hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
5b24b697d424bda194ffac371eaf99fb3efc6e84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zhemv_mgpu.cu, normal z -> s, Tue Aug 30 09:38:36 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in ssymv (single GPU); why?
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_S_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_S_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L_mgpu
/***************************************************************************//**
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
/***************************************************************************//**
Purpose
-------
magmablas_ssymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a REAL array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) REAL array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a REAL array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
float alpha,
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
float const *x, magma_int_t incx,
float beta, // unused, see magmablas_ssymv_mgpu_sync
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
float const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
float *dx_dev = dwork[dev];
float *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_ssetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
hipLaunchKernelGGL(( ssymv_kernel_U_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( ssymv_kernel_U_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
hipLaunchKernelGGL(( ssymv_kernel_L_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( ssymv_kernel_L_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
float *dx_dev = dwork[dev];
magma_sgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_ssymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/***************************************************************************//**
Synchronizes and acculumates final ssymv result.
For convenience, the parameters are identical to magmablas_ssymv_mgpu
(though some are unused here).
@see magmablas_ssymv_mgpu
@ingroup magma_hemv
*******************************************************************************/
extern "C" magma_int_t
magmablas_ssymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_ssymv_mgpu
magma_int_t n,
float alpha, // unused
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
float const *x, magma_int_t incx, // unused
float beta,
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const float c_one = MAGMA_S_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_sscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_saxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
| 5b24b697d424bda194ffac371eaf99fb3efc6e84.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zhemv_mgpu.cu, normal z -> s, Tue Aug 30 09:38:36 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in ssymv (single GPU); why?
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_S_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_S_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L_mgpu
/***************************************************************************//**
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
/***************************************************************************//**
Purpose
-------
magmablas_ssymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a REAL array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) REAL array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a REAL array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
float alpha,
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
float const *x, magma_int_t incx,
float beta, // unused, see magmablas_ssymv_mgpu_sync
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
float const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
float *dx_dev = dwork[dev];
float *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_ssetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
ssymv_kernel_U_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
ssymv_kernel_U_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
ssymv_kernel_L_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
ssymv_kernel_L_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
float *dx_dev = dwork[dev];
magma_sgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_ssymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/***************************************************************************//**
Synchronizes and acculumates final ssymv result.
For convenience, the parameters are identical to magmablas_ssymv_mgpu
(though some are unused here).
@see magmablas_ssymv_mgpu
@ingroup magma_hemv
*******************************************************************************/
extern "C" magma_int_t
magmablas_ssymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_ssymv_mgpu
magma_int_t n,
float alpha, // unused
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
float const *x, magma_int_t incx, // unused
float beta,
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const float c_one = MAGMA_S_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_sscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_saxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
385e3938fe067b9a37f638091a845245ee4837c3.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (C) 2019-2021 Megvii Inc. All rights reserved. */
#include <thread>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include "boruvka_rst.hpp"
static void forward_kernel(int * edge_index, float * edge_weight, int * edge_out, int vertex_count, int edge_count){
struct Graph * g = create_graph(vertex_count, edge_count);
for (int i = 0; i < edge_count; ++i){
g->edge[i].src = edge_index[i * 2];
g->edge[i].dest = edge_index[i * 2 + 1];
g->edge[i].weight = edge_weight[i];
}
boruvka_rst(g, edge_out);
delete[] g->edge;
delete[] g;
}
at::Tensor rst_forward(
const at::Tensor & edge_index_tensor,
const at::Tensor & edge_weight_tensor,
int vertex_count){
unsigned batch_size = edge_index_tensor.size(0);
unsigned edge_count = edge_index_tensor.size(1);
auto edge_index_cpu = edge_index_tensor.cpu();
auto edge_weight_cpu = edge_weight_tensor.cpu();
auto edge_out_cpu = at::empty({batch_size, vertex_count - 1, 2}, edge_index_cpu.options());
int * edge_out = edge_out_cpu.contiguous().data_ptr<int>();
int * edge_index = edge_index_cpu.contiguous().data_ptr<int>();
float * edge_weight = edge_weight_cpu.contiguous().data_ptr<float>();
// Loop for batch
std::thread pids[batch_size];
for (unsigned i = 0; i < batch_size; i++){
auto edge_index_iter = edge_index + i * edge_count * 2;
auto edge_weight_iter = edge_weight + i * edge_count;
auto edge_out_iter = edge_out + i * (vertex_count - 1) * 2;
pids[i] = std::thread(forward_kernel, edge_index_iter, edge_weight_iter, edge_out_iter, vertex_count, edge_count);
}
for (unsigned i = 0; i < batch_size; i++){
pids[i].join();
}
auto edge_out_tensor = edge_out_cpu.to(edge_index_tensor.device());
return edge_out_tensor;
}
| 385e3938fe067b9a37f638091a845245ee4837c3.cu | /* Copyright (C) 2019-2021 Megvii Inc. All rights reserved. */
#include <thread>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include "boruvka_rst.hpp"
static void forward_kernel(int * edge_index, float * edge_weight, int * edge_out, int vertex_count, int edge_count){
struct Graph * g = create_graph(vertex_count, edge_count);
for (int i = 0; i < edge_count; ++i){
g->edge[i].src = edge_index[i * 2];
g->edge[i].dest = edge_index[i * 2 + 1];
g->edge[i].weight = edge_weight[i];
}
boruvka_rst(g, edge_out);
delete[] g->edge;
delete[] g;
}
at::Tensor rst_forward(
const at::Tensor & edge_index_tensor,
const at::Tensor & edge_weight_tensor,
int vertex_count){
unsigned batch_size = edge_index_tensor.size(0);
unsigned edge_count = edge_index_tensor.size(1);
auto edge_index_cpu = edge_index_tensor.cpu();
auto edge_weight_cpu = edge_weight_tensor.cpu();
auto edge_out_cpu = at::empty({batch_size, vertex_count - 1, 2}, edge_index_cpu.options());
int * edge_out = edge_out_cpu.contiguous().data_ptr<int>();
int * edge_index = edge_index_cpu.contiguous().data_ptr<int>();
float * edge_weight = edge_weight_cpu.contiguous().data_ptr<float>();
// Loop for batch
std::thread pids[batch_size];
for (unsigned i = 0; i < batch_size; i++){
auto edge_index_iter = edge_index + i * edge_count * 2;
auto edge_weight_iter = edge_weight + i * edge_count;
auto edge_out_iter = edge_out + i * (vertex_count - 1) * 2;
pids[i] = std::thread(forward_kernel, edge_index_iter, edge_weight_iter, edge_out_iter, vertex_count, edge_count);
}
for (unsigned i = 0; i < batch_size; i++){
pids[i].join();
}
auto edge_out_tensor = edge_out_cpu.to(edge_index_tensor.device());
return edge_out_tensor;
}
|
e6780f5b909178d1ad0894e1d2663f4183b1d686.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA 2D advection solver test program
// written by Peter Strazdins, Apr 21 for COMP4300/8300 Assignment 2
// v1.0 29 Apr
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h> //getopt()
#include <assert.h>
#include <sys/time.h> //gettimeofday()
#include <string> //std::string
#include "serAdvect.h"
#include "parAdvect.h"
#define USAGE "testAdvect [-h] [-s] [-g Gx[,Gy]] [-b Bx[,By]] [-o] [-w w] [-v v] [-d d] M N [r]"
#define DEFAULTS "Gx=Gy=Bx=By=r=1 v=w=d=0"
#define OPTCHARS "hsg:b:ow:v:d:"
static int M, N; // advection field size
static int Gx=1, Gy=1; // grid dimensions
static int Bx=1, By=1; // (thread) block dimensions
static int r = 1; // number of timesteps for the simulation
static int optH = 0; // set if -h specified
static int optS = 0; // set if -s specified
static int optO = 0; // set if -o specified
static int verbosity = 0; // v, above
static int w = 0; // optional extra tuning parameter
static int deviceNum = 0; // d, above. id of GPU to be used
// print a usage message for this program and exit with a status of 1
void usage(std::string msg) {
printf("testAdvect: %s\n", msg.c_str());
printf("usage: %s\n\tdefault values: %s\n", USAGE, DEFAULTS);
fflush(stdout);
exit(1);
}
void getArgs(int argc, char *argv[]) {
extern char *optarg; // points to option argument (for -p option)
extern int optind; // index of last option parsed by getopt()
extern int opterr;
int optchar; // option character returned my getopt()
int optD = 0;
opterr = 0; // suppress getopt() error message for invalid option
while ((optchar = getopt(argc, argv, OPTCHARS)) != -1) {
// extract next option from the command line
switch (optchar) {
case 'h':
optH = 1;
break;
case 's':
optS = 1;
break;
case 'g':
if (sscanf(optarg, "%d,%d", &Gx, &Gy) < 1) // invalid integer
usage("bad value for Gx");
break;
case 'b':
if (sscanf(optarg, "%d,%d", &Bx, &By) < 1) // invalid integer
usage("bad value for Bx");
break;
case 'o':
optO = 1;
break;
case 'w':
if (sscanf(optarg, "%d", &w) != 1) // invalid integer
usage("bad value for w");
break;
case 'v':
if (sscanf(optarg, "%d", &verbosity) != 1) // invalid integer
usage("bad value for v");
break;
case 'd':
if (sscanf(optarg, "%d", &deviceNum) != 1) // invalid integer
usage("bad value for d");
optD = 1;
break;
default:
usage("unknown option");
break;
} //switch
} //while
if (optind < argc) {
if (sscanf(argv[optind], "%d", &M) != 1)
usage("bad value for M");
} else
usage("missing M");
N = M;
if (optind+1 < argc)
if (sscanf(argv[optind+1], "%d", &N) != 1)
usage("bad value for N");
if (optind+2 < argc)
if (sscanf(argv[optind+2], "%d", &r) != 1)
usage("bad value for r");
if (optH) //ignore -d
deviceNum = 0;
int maxDevices;
HANDLE_ERROR( hipGetDeviceCount(&maxDevices) );
if (deviceNum < 0 || deviceNum >= maxDevices) {
printf("warning: device id %d must be in range 0..%d. Using device 0.\n",
deviceNum, maxDevices-1);
deviceNum = 0;
}
if (optD)
HANDLE_ERROR( hipSetDevice(deviceNum) );
HANDLE_ERROR( hipGetDevice(&deviceNum) );
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDeviceProperties( &prop, deviceNum) );
if (prop.maxThreadsPerBlock < Bx * By)
printf("WARNING: Bx=%d By=%d too large for max threads per block = %d %s",
Bx, By, prop.maxThreadsPerBlock, "(EXPECT RUBBISH RESULTS)\n");
} //getArgs()
static void printAvgs(std::string name, double total, int nVals) {
printf("%s %.3e\n", name.c_str(), total / nVals);
}
//return wall time in seconds
static double Wtime() {
struct timeval tv;
gettimeofday(&tv, NULL);
return(1.0*tv.tv_sec + 1.0e-6*tv.tv_usec);
}
int main(int argc, char** argv) {
double *u, *u_d = NULL; int ldu, uSize; //advection field
double t, gflops, t_hd, t_dh; //times
getArgs(argc, argv);
printf("Advection of a %dx%d global field on %s %d"
" for %d steps.\n", M, N, optH? "host": "GPU", deviceNum, r);
if (optS)
printf("\tusing serial computation\n");
else if (optO)
printf("\tusing optimizations (Gx,Gy=%d,%d Bx,By=%d,%d w=%d)\n",
Gx, Gy, Bx, By, w);
else if (!optH)
printf("\tusing %dx%d blocks of %dx%d threads (2D decomposition)\n",
Gx, Gy, Bx, By);
initAdvectParams(M, N);
initParParams(M, N, Gx, Gy, Bx, By, verbosity);
ldu = N+2; uSize = (M+2)*ldu*sizeof(double);
u = (double *) calloc((M+2)*ldu, sizeof(double)); assert (u != NULL);
initAdvectField(M, N, &V(u,1,1), ldu);
if (verbosity > 1)
printAdvectField("init u", M, N, &V(u,1,1), ldu);
if (!optH) {
HANDLE_ERROR( hipMalloc(&u_d, uSize) );
t_hd = Wtime();
HANDLE_ERROR( hipMemcpy(u_d, u, uSize, hipMemcpyHostToDevice) );
t_hd = Wtime() - t_hd;
}
t = Wtime();
if (optH)
hostAdvectSerial(M, N, r, u, ldu);
else if (optS)
cudaAdvectSerial(M, N, r, u_d, ldu);
else if (optO)
cudaOptAdvect(r, u_d, ldu, w);
else
cuda2DAdvect(r, u_d, ldu);
HANDLE_ERROR( hipDeviceSynchronize() );
t = Wtime() - t;
gflops = 1.0e-09 * AdvFLOPsPerElt * M * N * r;
printf("Advection time %.2es, GFLOPs rate=%.2e\n", t, gflops / t);
if (!optH) {
t_dh = Wtime();
HANDLE_ERROR( hipMemcpy(u, u_d, uSize, hipMemcpyDeviceToHost) );
t_dh = Wtime() - t_dh;
HANDLE_ERROR( hipFree(u_d) );
printf("Copy times: host-device %.2es, device-host %.2es\n", t_hd, t_dh);
}
if (verbosity > 1)
printAdvectField("final u", M+2, N+2, u, ldu);
printAvgs("Avg error of final field: ",
errAdvectField(r, M, N, &V(u,1,1), ldu), M*N);
printAvgs("Max error of final field: ",
errMaxAdvectField(r, M, N, &V(u,1,1), ldu), 1);
free(u);
return 0;
} //main()
| e6780f5b909178d1ad0894e1d2663f4183b1d686.cu | // CUDA 2D advection solver test program
// written by Peter Strazdins, Apr 21 for COMP4300/8300 Assignment 2
// v1.0 29 Apr
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h> //getopt()
#include <assert.h>
#include <sys/time.h> //gettimeofday()
#include <string> //std::string
#include "serAdvect.h"
#include "parAdvect.h"
#define USAGE "testAdvect [-h] [-s] [-g Gx[,Gy]] [-b Bx[,By]] [-o] [-w w] [-v v] [-d d] M N [r]"
#define DEFAULTS "Gx=Gy=Bx=By=r=1 v=w=d=0"
#define OPTCHARS "hsg:b:ow:v:d:"
static int M, N; // advection field size
static int Gx=1, Gy=1; // grid dimensions
static int Bx=1, By=1; // (thread) block dimensions
static int r = 1; // number of timesteps for the simulation
static int optH = 0; // set if -h specified
static int optS = 0; // set if -s specified
static int optO = 0; // set if -o specified
static int verbosity = 0; // v, above
static int w = 0; // optional extra tuning parameter
static int deviceNum = 0; // d, above. id of GPU to be used
// print a usage message for this program and exit with a status of 1
void usage(std::string msg) {
printf("testAdvect: %s\n", msg.c_str());
printf("usage: %s\n\tdefault values: %s\n", USAGE, DEFAULTS);
fflush(stdout);
exit(1);
}
void getArgs(int argc, char *argv[]) {
extern char *optarg; // points to option argument (for -p option)
extern int optind; // index of last option parsed by getopt()
extern int opterr;
int optchar; // option character returned my getopt()
int optD = 0;
opterr = 0; // suppress getopt() error message for invalid option
while ((optchar = getopt(argc, argv, OPTCHARS)) != -1) {
// extract next option from the command line
switch (optchar) {
case 'h':
optH = 1;
break;
case 's':
optS = 1;
break;
case 'g':
if (sscanf(optarg, "%d,%d", &Gx, &Gy) < 1) // invalid integer
usage("bad value for Gx");
break;
case 'b':
if (sscanf(optarg, "%d,%d", &Bx, &By) < 1) // invalid integer
usage("bad value for Bx");
break;
case 'o':
optO = 1;
break;
case 'w':
if (sscanf(optarg, "%d", &w) != 1) // invalid integer
usage("bad value for w");
break;
case 'v':
if (sscanf(optarg, "%d", &verbosity) != 1) // invalid integer
usage("bad value for v");
break;
case 'd':
if (sscanf(optarg, "%d", &deviceNum) != 1) // invalid integer
usage("bad value for d");
optD = 1;
break;
default:
usage("unknown option");
break;
} //switch
} //while
if (optind < argc) {
if (sscanf(argv[optind], "%d", &M) != 1)
usage("bad value for M");
} else
usage("missing M");
N = M;
if (optind+1 < argc)
if (sscanf(argv[optind+1], "%d", &N) != 1)
usage("bad value for N");
if (optind+2 < argc)
if (sscanf(argv[optind+2], "%d", &r) != 1)
usage("bad value for r");
if (optH) //ignore -d
deviceNum = 0;
int maxDevices;
HANDLE_ERROR( cudaGetDeviceCount(&maxDevices) );
if (deviceNum < 0 || deviceNum >= maxDevices) {
printf("warning: device id %d must be in range 0..%d. Using device 0.\n",
deviceNum, maxDevices-1);
deviceNum = 0;
}
if (optD)
HANDLE_ERROR( cudaSetDevice(deviceNum) );
HANDLE_ERROR( cudaGetDevice(&deviceNum) );
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDeviceProperties( &prop, deviceNum) );
if (prop.maxThreadsPerBlock < Bx * By)
printf("WARNING: Bx=%d By=%d too large for max threads per block = %d %s",
Bx, By, prop.maxThreadsPerBlock, "(EXPECT RUBBISH RESULTS)\n");
} //getArgs()
static void printAvgs(std::string name, double total, int nVals) {
printf("%s %.3e\n", name.c_str(), total / nVals);
}
//return wall time in seconds
static double Wtime() {
struct timeval tv;
gettimeofday(&tv, NULL);
return(1.0*tv.tv_sec + 1.0e-6*tv.tv_usec);
}
int main(int argc, char** argv) {
double *u, *u_d = NULL; int ldu, uSize; //advection field
double t, gflops, t_hd, t_dh; //times
getArgs(argc, argv);
printf("Advection of a %dx%d global field on %s %d"
" for %d steps.\n", M, N, optH? "host": "GPU", deviceNum, r);
if (optS)
printf("\tusing serial computation\n");
else if (optO)
printf("\tusing optimizations (Gx,Gy=%d,%d Bx,By=%d,%d w=%d)\n",
Gx, Gy, Bx, By, w);
else if (!optH)
printf("\tusing %dx%d blocks of %dx%d threads (2D decomposition)\n",
Gx, Gy, Bx, By);
initAdvectParams(M, N);
initParParams(M, N, Gx, Gy, Bx, By, verbosity);
ldu = N+2; uSize = (M+2)*ldu*sizeof(double);
u = (double *) calloc((M+2)*ldu, sizeof(double)); assert (u != NULL);
initAdvectField(M, N, &V(u,1,1), ldu);
if (verbosity > 1)
printAdvectField("init u", M, N, &V(u,1,1), ldu);
if (!optH) {
HANDLE_ERROR( cudaMalloc(&u_d, uSize) );
t_hd = Wtime();
HANDLE_ERROR( cudaMemcpy(u_d, u, uSize, cudaMemcpyHostToDevice) );
t_hd = Wtime() - t_hd;
}
t = Wtime();
if (optH)
hostAdvectSerial(M, N, r, u, ldu);
else if (optS)
cudaAdvectSerial(M, N, r, u_d, ldu);
else if (optO)
cudaOptAdvect(r, u_d, ldu, w);
else
cuda2DAdvect(r, u_d, ldu);
HANDLE_ERROR( cudaDeviceSynchronize() );
t = Wtime() - t;
gflops = 1.0e-09 * AdvFLOPsPerElt * M * N * r;
printf("Advection time %.2es, GFLOPs rate=%.2e\n", t, gflops / t);
if (!optH) {
t_dh = Wtime();
HANDLE_ERROR( cudaMemcpy(u, u_d, uSize, cudaMemcpyDeviceToHost) );
t_dh = Wtime() - t_dh;
HANDLE_ERROR( cudaFree(u_d) );
printf("Copy times: host-device %.2es, device-host %.2es\n", t_hd, t_dh);
}
if (verbosity > 1)
printAdvectField("final u", M+2, N+2, u, ldu);
printAvgs("Avg error of final field: ",
errAdvectField(r, M, N, &V(u,1,1), ldu), M*N);
printAvgs("Max error of final field: ",
errMaxAdvectField(r, M, N, &V(u,1,1), ldu), 1);
free(u);
return 0;
} //main()
|
f62804bb02bc914dfcaee07824fa18d4956a8c67.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/dictionary/detail/encode.hpp>
namespace cudf
{
namespace dictionary
{
namespace detail
{
/**
* @brief Decode a column from a dictionary.
*/
std::unique_ptr<column> decode( dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if( source.size()==0 || source.keys_size()==0 )
return make_empty_column( data_type{EMPTY} );
auto keys = source.keys();
auto indices = source.indices();
if( indices.size()==0 )
return make_empty_column( keys.type() );
// use gather to create the output column -- use ignore_out_of_bounds=true
auto table_column = experimental::detail::gather( table_view{{keys}}, indices, // no nulls here
false, true, false, mr, stream )->release();
auto output_column = std::unique_ptr<column>(std::move(table_column.front()));
// apply any nulls to the output column
output_column->set_null_mask( copy_bitmask(source.parent(),stream,mr), source.null_count() );
return output_column;
}
} // namespace detail
std::unique_ptr<column> decode( dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr)
{
return detail::decode(source,mr);
}
} // namespace dictionary
} // namespace cudf
| f62804bb02bc914dfcaee07824fa18d4956a8c67.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/dictionary/detail/encode.hpp>
namespace cudf
{
namespace dictionary
{
namespace detail
{
/**
* @brief Decode a column from a dictionary.
*/
std::unique_ptr<column> decode( dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if( source.size()==0 || source.keys_size()==0 )
return make_empty_column( data_type{EMPTY} );
auto keys = source.keys();
auto indices = source.indices();
if( indices.size()==0 )
return make_empty_column( keys.type() );
// use gather to create the output column -- use ignore_out_of_bounds=true
auto table_column = experimental::detail::gather( table_view{{keys}}, indices, // no nulls here
false, true, false, mr, stream )->release();
auto output_column = std::unique_ptr<column>(std::move(table_column.front()));
// apply any nulls to the output column
output_column->set_null_mask( copy_bitmask(source.parent(),stream,mr), source.null_count() );
return output_column;
}
} // namespace detail
std::unique_ptr<column> decode( dictionary_column_view const& source,
rmm::mr::device_memory_resource* mr)
{
return detail::decode(source,mr);
}
} // namespace dictionary
} // namespace cudf
|
3518c46fa683b81785b64de33fb12c02126bce13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// clang-format off
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "fbgemm_gpu/embedding_backward_template_helpers.cuh"
#include "fbgemm_gpu/split_embeddings_utils.cuh"
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
using Tensor = at::Tensor;
using namespace fbgemm_gpu;
template <
typename emb_t,
typename grad_t,
typename cache_t,
size_t kMaxVecsPerThread,
int32_t kThreadGroupSize = kWarpSize>
__global__ __launch_bounds__(kMaxThreads) void
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1(
const at::PackedTensorAccessor64<grad_t, 2, at::RestrictPtrTraits> grad_output,
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights,
{% if not dense %}
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
weights_placements,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets,
{% else %}
int32_t B,
int64_t D,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
hash_size_cumsum,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_run,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
long_run_ids,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
num_long_run_ids,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos,
{% else %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos,
{% endif %}
{% if not dense %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights,
{% endif %}
{% if not dense %}
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights,
{% endif %}
{% if not nobag %}
FixedDivisor fd,
{% endif %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> long_run_id_to_really_long_run_ids,
at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 2, at::RestrictPtrTraits> temp_grad_accum,
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> grad_accum_counter,
const int32_t max_segment_length_per_cta,
const bool use_deterministic_algorithms,
{{ args.split_kernel_args | join(", ") }}) {
#ifdef FBGEMM_USE_SUBWARP_SHUFFLE
const unsigned int shfl_sync_mask =
((1L << kThreadGroupSize) - 1) <<
(threadIdx.y % (kWarpSize / kThreadGroupSize) * kThreadGroupSize);
#else
const unsigned int shfl_sync_mask = 0xffffffffu;
#endif
constexpr int VEC_WIDTH = 4;
int32_t T = weights_offsets.size(0);
{% if not nobag %}
const int32_t B = grad_output.size(0);
{% endif %}
const int32_t num_long_runs = num_long_run_ids[0];
for (int32_t long_run_id = blockIdx.x; long_run_id < num_long_runs; long_run_id += gridDim.x) {
// The first thread block in the really long run has run_id in long_run_ids
// and the rest have the negative of its offset (see find_long_segments kernel).
int32_t cta_rank_on_current_run = 0;
int32_t current_run_id = long_run_ids[long_run_id];
if (current_run_id < 0) {
cta_rank_on_current_run = -long_run_ids[long_run_id];
current_run_id = long_run_ids[long_run_id - cta_rank_on_current_run];
}
const int32_t run_length =
sorted_linear_indices_cumulative_run_lengths[current_run_id + 1] -
sorted_linear_indices_cumulative_run_lengths[current_run_id];
// This computation must agree with how we compute num_ctas_for_run in
// find_long_segments kernel!
const int32_t num_ctas_on_current_run =
use_deterministic_algorithms ? 1 : div_round_up(run_length, max_segment_length_per_cta);
const int64_t linear_index = sorted_linear_indices_run[current_run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[current_run_id] +
cta_rank_on_current_run * max_segment_length_per_cta;
const int32_t segment_end = ::min(
use_deterministic_algorithms ? INT_MAX : segment_start + max_segment_length_per_cta,
sorted_linear_indices_cumulative_run_lengths[current_run_id + 1]);
const int32_t SL = segment_end - segment_start;
const int32_t warp_id = threadIdx.y;
const int32_t lane_id = threadIdx.x;
// Note that with shared embedding tables we can have multiple tables
// (i.e. different values of `t` sharing the same segment).
//
const auto info_0 = sorted_infos[segment_start];
{% if not nobag %}
int32_t t_0 = fd.Div(info_0); //info_0 / B;
{% else %}
int32_t t_0 = info_0 % T;
{% endif %}
int64_t hash_size = hash_size_cumsum[t_0];
{% if not nobag %}
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
{% endif %}
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = SL_per_warp * warp_id;
const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL);
Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kThreadGroupSize) {
int32_t sl_j = sl + threadIdx.x;
{% if not nobag %}
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0;
{% else %}
int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t l = l_t / T;
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kThreadGroupSize && sl + j < sl_end; ++j) {
{% if not nobag %}
int32_t b_j = SHFL_SYNC(b, j);
int32_t D_start_j = SHFL_SYNC(D_start, j);
{% else %}
int32_t l_j = SHFL_SYNC(l, j);
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight_j = SHFL_SYNC(idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
{% if not nobag %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% else %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]);
{% endif %}
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].add_(grad_out_vec);
{% endif %}
}
}
}
// do shared memory reduction only if we used multiple warps.
if (SL > SL_per_warp) {
struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> smem;
Vec4T<at::acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer();
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] = grad_sum[i];
}
__syncthreads();
if (blockDim.y >= 32) {
if (warp_id < 16) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 16) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 16) {
if (warp_id < 8) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 8) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 8) {
if (warp_id < 4) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 4) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 4) {
if (warp_id < 2) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 2) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (warp_id == 0) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
grad_sum[i] = vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 1) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
}
if (warp_id != 0) {
continue;
}
if (num_ctas_on_current_run > 1) {
int really_long_run_id = long_run_id_to_really_long_run_ids[long_run_id];
Vec4T<at::acc_type<cache_t, true>> *temp_grad_accum_ptr =
reinterpret_cast<Vec4T<at::acc_type<cache_t, true>>*>(&temp_grad_accum[really_long_run_id][0]);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.x, grad_sum[i].acc.x);
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.y, grad_sum[i].acc.y);
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.z, grad_sum[i].acc.z);
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.w, grad_sum[i].acc.w);
}
int counter;
if (threadIdx.x == 0) {
__threadfence();
counter = gpuAtomicAdd(&grad_accum_counter[really_long_run_id], -1);
}
counter = SHFL_SYNC(counter, 0);
// Only the thread block accumulated the gradient last does the weight update.
if (counter > 1) {
continue;
}
CUDA_KERNEL_ASSERT(counter == 1 && "Invalid grad_accum_counter. Race condition?");
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
grad_sum[i] = temp_grad_accum_ptr[lane_id + i * kThreadGroupSize];
}
}
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]);
if (weights_placement == PlacementType::DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == PlacementType::MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
at::acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]);
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == PlacementType::DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + current_run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams_template = weight_row_template.load_qparams();
}
{{ split_precomputation }}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[lane_id + i * kThreadGroupSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate qparams from updated weight row
qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(shared_weight_update_row, D);
weight_row_template.store_qparams(qparams_new);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
weight_row_template.store(shared_weight_update_row[lane_id + i * kThreadGroupSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
} // for each run
}
template <
typename emb_t,
typename grad_t,
typename cache_t,
size_t kMaxVecsPerThread,
int32_t kThreadGroupSize = kWarpSize>
__global__
__launch_bounds__(kBackwardMaxThreads)
void
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1(
const at::PackedTensorAccessor64<grad_t, 2, at::RestrictPtrTraits>
grad_output,
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights,
{% if not dense %}
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
weights_placements,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets,
{% else %}
int32_t B,
int64_t D,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
hash_size_cumsum,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_run,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos,
{% else %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos,
{% endif %}
{% if not dense %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights,
{% endif %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_num_runs,
int32_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights,
{% endif %}
{% if not nobag %}
FixedDivisor fd,
{% endif %}
{{ args.split_kernel_args | join(", ") }}) {
{% if not nobag %}
int32_t T = D_offsets.size(0) - 1;
const int32_t B = grad_output.size(0);
{% else %}
int32_t T = weights_offsets.size(0);
{% endif %}
const int32_t start_run_id = blockIdx.x * blockDim.y + threadIdx.y;
#ifdef FBGEMM_USE_SUBWARP_SHUFFLE
const unsigned int shfl_sync_mask =
((1L << kThreadGroupSize) - 1) <<
(threadIdx.y % (kWarpSize / kThreadGroupSize) * kThreadGroupSize);
#else
const unsigned int shfl_sync_mask = 0xffffffffu;
#endif
constexpr int VEC_WIDTH = 4;
for (uint32_t run_id = start_run_id;
run_id < sorted_linear_indices_run.size(0) && run_id < sorted_linear_indices_num_runs[0];
run_id += gridDim.x * blockDim.y) {
const int64_t linear_index = sorted_linear_indices_run[run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[run_id];
const int32_t segment_end =
sorted_linear_indices_cumulative_run_lengths[run_id + 1];
const int32_t SL = segment_end - segment_start;
if (SL >= max_segment_length_per_warp) {
continue;
}
// now, each segment corresponds to exactly one table `t` and row in
// that table (`idx`). Thus, we can hoist out some of the book-keeping.
const auto info_0 = sorted_infos[segment_start];
{% if not nobag %}
int32_t t_0 = fd.Div(info_0); // info_0 / B;
{% else %}
int32_t t_0 = info_0 % T;
{% endif %}
int64_t hash_size = hash_size_cumsum[t_0];
{% if not nobag %}
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
{% endif %}
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = 0;
const int32_t sl_end = SL;
Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kThreadGroupSize) {
int32_t sl_j = sl + threadIdx.x;
{% if not nobag %}
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = D_offsets[t];
{% else %}
int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t l = l_t / T;
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kThreadGroupSize && sl + j < sl_end; ++j) {
{% if not nobag %}
int32_t b_j = SHFL_SYNC(b, j);
int32_t D_start_j = SHFL_SYNC(D_start, j);
{% else %}
int32_t l_j = SHFL_SYNC(l, j);
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight_j = SHFL_SYNC(idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
{% if not nobag %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% else %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]);
{% endif %}
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].add_(grad_out_vec);
{% endif %}
}
}
}
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]);
if (weights_placement == PlacementType::DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == PlacementType::MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
at::acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]);
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == PlacementType::DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights){
qparams_template = weight_row_template.load_qparams();
}
{{ split_precomputation }}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[threadIdx.x + (i + threadIdx.y * kMaxVecsPerThread) * kThreadGroupSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate new qparams after row update
qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kThreadGroupSize], D);
weight_row_template.store_qparams(qparams_new);
// fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
weight_row_template.store(shared_weight_update_row[threadIdx.x + (i + threadIdx.y * kMaxVecsPerThread) * kThreadGroupSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
}
}
{{ "void" if not dense else "Tensor" }} split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda(
Tensor grad_output,
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
{% if not nobag %}
Tensor D_offsets,
int64_t max_D,
{% else %}
int64_t D,
{% endif %}
Tensor hash_size_cumsum,
int64_t total_hash_size_bits,
Tensor indices,
Tensor offsets,
{% if not nobag %}
int64_t pooling_mode,
{% endif %}
{% if weighted %}
Tensor indice_weights,
{% endif %}
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
int64_t unused_,
int64_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
{% endif %}
{{ args.split_function_args | join(", ") }}) {
TENSOR_ON_CUDA_GPU(grad_output);
TENSOR_ON_CUDA_GPU(dev_weights);
{% if not dense %}
TENSOR_ON_CUDA_GPU(uvm_weights);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(weights_placements);
{% endif %}
TENSOR_ON_CUDA_GPU(weights_offsets);
{% if not nobag %}
TENSOR_ON_CUDA_GPU(D_offsets);
{% endif %}
TENSOR_ON_CUDA_GPU(hash_size_cumsum);
TENSOR_ON_CUDA_GPU(indices);
TENSOR_ON_CUDA_GPU(offsets);
{% if weighted %}
TENSOR_ON_CUDA_GPU(indice_weights);
{% endif %}
{% if not dense %}
TENSOR_ON_CUDA_GPU(lxu_cache_locations);
{% endif %}
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(dev_weights.get_device());
{% if dense %}
auto grad_dev_weights = zeros_like(dev_weights);
{% endif %}
// short-circuit if there are zero indices.
if (indices.numel() == 0) {
return {{ "grad_dev_weights" if dense else "" }};
}
{% if not nobag %}
int32_t T = D_offsets.numel() - 1;
{% else %}
int32_t T = weights_offsets.numel();
{% endif %}
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
const auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
auto BT_block_size = kMaxThreads / kWarpSize;
TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads);
{% if nobag %}
auto max_D = D;
{% endif %}
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
// V100: 96 KB; A100: 160 KB.
int max_shared_bytes = 0;
#ifndef __HIP_PLATFORM_HCC__
hipDeviceGetAttribute(&max_shared_bytes, hipDeviceAttributeSharedMemPerBlockOptin, dev_weights.get_device());
#else
// MI100 has 64 KB local memory (shared memory) per workgroup
max_shared_bytes = 64 << 10;
#endif
C10_HIP_KERNEL_LAUNCH_CHECK();
int shared_kb = max_shared_bytes >> 10;
// V100: 64 KB; A100: 96 KB.
#ifndef __HIP_PLATFORM_HCC__
// Use 2/3 of the available GPU shared mem; leave rooms for L1$.
int used_shared_kb = round_down(shared_kb * 2 / 3, 16);
TORCH_CHECK(used_shared_kb > 0);
#else
// MI100 has independent shared mem and L1
int used_shared_kb = shared_kb;
#endif
int used_shared_bytes = used_shared_kb << 10;
Tensor linear_indices, linear_indices_sorted;
Tensor infos_sorted;
Tensor sorted_linear_indices_run, sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths;
std::tie(
linear_indices,
linear_indices_sorted,
infos_sorted,
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths) =
transpose_embedding_input(
hash_size_cumsum,
total_hash_size_bits,
indices,
offsets,
{{"true" if nobag else "false"}});
{% if not dense %}
auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations);
if (lxu_cache_locations.size(0) > 0) {
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(radix_sort_pairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(at::kByte));
AT_CUDA_CHECK(radix_sort_pairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
}
{% endif %}
DISPATCH_EMB_GRAD_CACHE_TYPES(
dev_weights.scalar_type(),
grad_output.scalar_type(),
{% if not dense %}
lxu_cache_weights.scalar_type(),
{% else %}
dev_weights.scalar_type(),
{% endif %}
"split_embedding_backward_{{ optimizer }}_exact_kernel",
[&] {
{% if weighted %}
auto indice_weights_sorted = at::empty_like(indice_weights);
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(radix_sort_pairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
indice_weights.data_ptr<at::acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(at::kByte));
AT_CUDA_CHECK(radix_sort_pairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
indice_weights.data_ptr<at::acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
}
{% endif %}
// early memory release
linear_indices.reset();
linear_indices_sorted.reset();
auto grad_output_accessor = grad_output.packed_accessor64<grad_t, 2, at::RestrictPtrTraits>();
{% if not nobag %}
Tensor grad_output_mean;
if (static_cast<PoolingMode>(pooling_mode) == PoolingMode::MEAN) {
grad_output_mean = at::empty_like(grad_output);
hipLaunchKernelGGL(( grad_mean_kernel<grad_t>)
, dim3(div_round_up((B * T), kMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_accessor,
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
grad_output_mean.packed_accessor64<
grad_t, 2, at::RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
grad_output_accessor = grad_output_mean.packed_accessor64<
grad_t, 2, at::RestrictPtrTraits>();
}
{% endif %}
{% if not dense %}
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs =
at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
{% endif %}
// kMaxElemPerThread is # of elements handled by thread if we use a full warp for a row
// We consider kMaxElemPerThread 1 and 2, and then a multiple of 4.
{% for kMaxElemPerThread in range(1, max_embedding_dim // (items_per_warp // 4) + 1) %}
{% if kMaxElemPerThread in [1, 2] or kMaxElemPerThread % 4 == 0 %}
if (max_D <= {{ items_per_warp // 4 * kMaxElemPerThread }}) {
// hipcc can't use max in constexpr
constexpr int kMaxVecsPerThread = {{ kMaxElemPerThread }} / 4 >= 1 ? {{ kMaxElemPerThread }} / 4 : 1;
// If max_D is small, use fewer number of threads than kWarpSize.
#ifdef FBGEMM_USE_SUBWARP_SHUFFLE
constexpr int kThreadGroupSize = kWarpSize / ::max(4 / {{ kMaxElemPerThread }}, 1);
#else
constexpr int kThreadGroupSize = kWarpSize;
#endif
// Stay under used_shared_kb of shared memory (V100: 64 KB; A100: 96 KB), BT_block_size must be a power of two.
while (BT_block_size * sizeof(at::acc_type<cache_t, true>) * 4 * kWarpSize * kMaxVecsPerThread >= used_shared_bytes) {
BT_block_size /= 2;
}
TORCH_CHECK(BT_block_size >= 1);
if (std::is_same<emb_t, double>::value) {
// Otherwise we see CUDA kernel launch failures despite the above checks.
BT_block_size = 1;
}
auto long_run_ids = at::empty_like(sorted_linear_indices_run_lengths);
auto num_long_run_ids = at::zeros({1}, indices.options().dtype(at::kInt));
const bool use_deterministic_algorithms = at::globalContext().deterministicAlgorithms();
const int max_segment_length_per_cta = use_deterministic_algorithms ? INT_MAX : 1024;
Tensor long_run_id_to_really_long_run_ids;
if (use_deterministic_algorithms) {
long_run_id_to_really_long_run_ids =
at::empty(0, sorted_linear_indices_run_lengths.options());
} else {
long_run_id_to_really_long_run_ids =
at::empty_like(sorted_linear_indices_run_lengths);
}
auto num_really_long_run_ids = at::zeros({1}, indices.options().dtype(at::kInt));
auto grad_accum_counter = at::empty(
use_deterministic_algorithms ? 0 : (indices.numel() / max_segment_length_per_cta),
indices.options().dtype(at::kInt));
hipLaunchKernelGGL(( split_embedding_backward_codegen_find_long_segments),
dim3(div_round_up(indices.numel(), kMaxThreads)),
dim3(kMaxThreads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()
,
sorted_linear_indices_num_runs.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_run_lengths.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
num_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
long_run_id_to_really_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
num_really_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
grad_accum_counter.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
max_segment_length_per_warp,
max_segment_length_per_cta,
use_deterministic_algorithms);
C10_HIP_KERNEL_LAUNCH_CHECK();
// A temp buffer to accumulate gradients with atomics.
auto temp_grad_accum = at::zeros(
{use_deterministic_algorithms ? 0 : grad_accum_counter.numel(), max_D},
grad_output.options().dtype(std::is_same<cache_t, double>::value ? at::kDouble : at::kFloat));
int32_t grid_size = ::min(
div_round_up(long_run_ids.numel(), kMaxThreads),
get_max_thread_blocks_());
// Check https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x
// "Compute capability 7.x devices allow a single thread block to
// address the full capacity of shared memory: 96 KB on Volta,
// 64 KB on Turing. Kernels relying on shared memory allocations
// over 48 KB per block are architecture-specific, as such they
// must use dynamic shared memory (rather than statically sized
// arrays) and require an explicit opt-in using hipFuncSetAttribute()".
#ifndef __HIP_PLATFORM_HCC__
hipFuncSetAttribute(
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>,
hipFuncAttributeMaxDynamicSharedMemorySize,
used_shared_bytes); // V100: 64 KB; A100: 96 KB.
#endif
C10_HIP_KERNEL_LAUNCH_CHECK();
// dividing by kMaxThreads is a heuristic to avoid num of blocks far exceeding num_long_run_ids[0]
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_cta_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>)
, dim3(grid_size),
dim3(dim3(kThreadGroupSize, BT_block_size)),
BT_block_size * sizeof(at::acc_type<cache_t, true>) * 4 * kWarpSize *
kMaxVecsPerThread,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
B,
D,
{% endif %}
hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
num_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<cache_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not nobag %}
FixedDivisor(B),
{% endif %}
long_run_id_to_really_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
temp_grad_accum.packed_accessor32<at::acc_type<cache_t, true>, 2, at::RestrictPtrTraits>(),
grad_accum_counter.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
max_segment_length_per_cta,
use_deterministic_algorithms,
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_HIP_KERNEL_LAUNCH_CHECK();
grid_size = ::min(
div_round_up(sorted_linear_indices_run.numel(), kBackwardMaxThreads / kThreadGroupSize),
get_max_thread_blocks_());
// Shared memory is not needed for non uint8_t weights
size_t shmem_bytes = 0;
if (std::is_same<emb_t, uint8_t>::value) {
shmem_bytes = BT_block_size * sizeof(
at::acc_type<cache_t, true>) * 4 * kWarpSize * kMaxVecsPerThread;
#ifndef __HIP_PLATFORM_HCC__
hipFuncSetAttribute(
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>,
hipFuncAttributeMaxDynamicSharedMemorySize,
used_shared_bytes); // V100: 64 KB; A100: 96 KB.
#endif
}
C10_HIP_KERNEL_LAUNCH_CHECK();
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_warp_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>)
, dim3(grid_size),
dim3(dim3(kThreadGroupSize, kBackwardMaxThreads / kThreadGroupSize)),
shmem_bytes,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
B,
D,
{% endif %}
hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits>(),
{% endif %}
sorted_linear_indices_num_runs
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
max_segment_length_per_warp,
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<cache_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not nobag %}
FixedDivisor(B),
{% endif %}
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
{% endif %}
{% endfor %}
});
return {{ "grad_dev_weights" if dense else "" }};
}
// clang-format on
| 3518c46fa683b81785b64de33fb12c02126bce13.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// clang-format off
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "fbgemm_gpu/embedding_backward_template_helpers.cuh"
#include "fbgemm_gpu/split_embeddings_utils.cuh"
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
using Tensor = at::Tensor;
using namespace fbgemm_gpu;
template <
typename emb_t,
typename grad_t,
typename cache_t,
size_t kMaxVecsPerThread,
int32_t kThreadGroupSize = kWarpSize>
__global__ __launch_bounds__(kMaxThreads) void
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1(
const at::PackedTensorAccessor64<grad_t, 2, at::RestrictPtrTraits> grad_output,
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights,
{% if not dense %}
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
weights_placements,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets,
{% else %}
int32_t B,
int64_t D,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
hash_size_cumsum,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_run,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
long_run_ids,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
num_long_run_ids,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos,
{% else %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos,
{% endif %}
{% if not dense %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights,
{% endif %}
{% if not dense %}
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights,
{% endif %}
{% if not nobag %}
FixedDivisor fd,
{% endif %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> long_run_id_to_really_long_run_ids,
at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 2, at::RestrictPtrTraits> temp_grad_accum,
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> grad_accum_counter,
const int32_t max_segment_length_per_cta,
const bool use_deterministic_algorithms,
{{ args.split_kernel_args | join(", ") }}) {
#ifdef FBGEMM_USE_SUBWARP_SHUFFLE
const unsigned int shfl_sync_mask =
((1L << kThreadGroupSize) - 1) <<
(threadIdx.y % (kWarpSize / kThreadGroupSize) * kThreadGroupSize);
#else
const unsigned int shfl_sync_mask = 0xffffffffu;
#endif
constexpr int VEC_WIDTH = 4;
int32_t T = weights_offsets.size(0);
{% if not nobag %}
const int32_t B = grad_output.size(0);
{% endif %}
const int32_t num_long_runs = num_long_run_ids[0];
for (int32_t long_run_id = blockIdx.x; long_run_id < num_long_runs; long_run_id += gridDim.x) {
// The first thread block in the really long run has run_id in long_run_ids
// and the rest have the negative of its offset (see find_long_segments kernel).
int32_t cta_rank_on_current_run = 0;
int32_t current_run_id = long_run_ids[long_run_id];
if (current_run_id < 0) {
cta_rank_on_current_run = -long_run_ids[long_run_id];
current_run_id = long_run_ids[long_run_id - cta_rank_on_current_run];
}
const int32_t run_length =
sorted_linear_indices_cumulative_run_lengths[current_run_id + 1] -
sorted_linear_indices_cumulative_run_lengths[current_run_id];
// This computation must agree with how we compute num_ctas_for_run in
// find_long_segments kernel!
const int32_t num_ctas_on_current_run =
use_deterministic_algorithms ? 1 : div_round_up(run_length, max_segment_length_per_cta);
const int64_t linear_index = sorted_linear_indices_run[current_run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[current_run_id] +
cta_rank_on_current_run * max_segment_length_per_cta;
const int32_t segment_end = std::min(
use_deterministic_algorithms ? INT_MAX : segment_start + max_segment_length_per_cta,
sorted_linear_indices_cumulative_run_lengths[current_run_id + 1]);
const int32_t SL = segment_end - segment_start;
const int32_t warp_id = threadIdx.y;
const int32_t lane_id = threadIdx.x;
// Note that with shared embedding tables we can have multiple tables
// (i.e. different values of `t` sharing the same segment).
//
const auto info_0 = sorted_infos[segment_start];
{% if not nobag %}
int32_t t_0 = fd.Div(info_0); //info_0 / B;
{% else %}
int32_t t_0 = info_0 % T;
{% endif %}
int64_t hash_size = hash_size_cumsum[t_0];
{% if not nobag %}
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
{% endif %}
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = SL_per_warp * warp_id;
const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL);
Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kThreadGroupSize) {
int32_t sl_j = sl + threadIdx.x;
{% if not nobag %}
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0;
{% else %}
int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t l = l_t / T;
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kThreadGroupSize && sl + j < sl_end; ++j) {
{% if not nobag %}
int32_t b_j = SHFL_SYNC(b, j);
int32_t D_start_j = SHFL_SYNC(D_start, j);
{% else %}
int32_t l_j = SHFL_SYNC(l, j);
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight_j = SHFL_SYNC(idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
{% if not nobag %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% else %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]);
{% endif %}
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].add_(grad_out_vec);
{% endif %}
}
}
}
// do shared memory reduction only if we used multiple warps.
if (SL > SL_per_warp) {
struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> smem;
Vec4T<at::acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer();
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] = grad_sum[i];
}
__syncthreads();
if (blockDim.y >= 32) {
if (warp_id < 16) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 16) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 16) {
if (warp_id < 8) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 8) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 8) {
if (warp_id < 4) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 4) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 4) {
if (warp_id < 2) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
(i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 2) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
__syncthreads();
}
if (warp_id == 0) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
grad_sum[i] = vec4_acc(
shared_grad_sums
[lane_id + i * kThreadGroupSize +
warp_id * kMaxVecsPerThread * kThreadGroupSize],
shared_grad_sums
[lane_id + i * kThreadGroupSize +
(warp_id + 1) * kMaxVecsPerThread * kThreadGroupSize]);
}
}
}
if (warp_id != 0) {
continue;
}
if (num_ctas_on_current_run > 1) {
int really_long_run_id = long_run_id_to_really_long_run_ids[long_run_id];
Vec4T<at::acc_type<cache_t, true>> *temp_grad_accum_ptr =
reinterpret_cast<Vec4T<at::acc_type<cache_t, true>>*>(&temp_grad_accum[really_long_run_id][0]);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.x, grad_sum[i].acc.x);
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.y, grad_sum[i].acc.y);
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.z, grad_sum[i].acc.z);
gpuAtomicAdd(&temp_grad_accum_ptr[lane_id + i * kThreadGroupSize].acc.w, grad_sum[i].acc.w);
}
int counter;
if (threadIdx.x == 0) {
__threadfence();
counter = gpuAtomicAdd(&grad_accum_counter[really_long_run_id], -1);
}
counter = SHFL_SYNC(counter, 0);
// Only the thread block accumulated the gradient last does the weight update.
if (counter > 1) {
continue;
}
CUDA_KERNEL_ASSERT(counter == 1 && "Invalid grad_accum_counter. Race condition?");
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
grad_sum[i] = temp_grad_accum_ptr[lane_id + i * kThreadGroupSize];
}
}
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]);
if (weights_placement == PlacementType::DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == PlacementType::MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
at::acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]);
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == PlacementType::DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + current_run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams_template = weight_row_template.load_qparams();
}
{{ split_precomputation }}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[lane_id + i * kThreadGroupSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate qparams from updated weight row
qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(shared_weight_update_row, D);
weight_row_template.store_qparams(qparams_new);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
weight_row_template.store(shared_weight_update_row[lane_id + i * kThreadGroupSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
} // for each run
}
template <
typename emb_t,
typename grad_t,
typename cache_t,
size_t kMaxVecsPerThread,
int32_t kThreadGroupSize = kWarpSize>
__global__
__launch_bounds__(kBackwardMaxThreads)
void
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1(
const at::PackedTensorAccessor64<grad_t, 2, at::RestrictPtrTraits>
grad_output,
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights,
{% if not dense %}
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
weights_placements,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets,
{% else %}
int32_t B,
int64_t D,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
hash_size_cumsum,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_run,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
{% if not nobag %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> sorted_infos,
{% else %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> sorted_infos,
{% endif %}
{% if not dense %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits> sorted_indice_weights,
{% endif %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
sorted_linear_indices_num_runs,
int32_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
at::PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
at::PackedTensorAccessor64<cache_t, 1, at::RestrictPtrTraits> grad_dev_weights,
{% endif %}
{% if not nobag %}
FixedDivisor fd,
{% endif %}
{{ args.split_kernel_args | join(", ") }}) {
{% if not nobag %}
int32_t T = D_offsets.size(0) - 1;
const int32_t B = grad_output.size(0);
{% else %}
int32_t T = weights_offsets.size(0);
{% endif %}
const int32_t start_run_id = blockIdx.x * blockDim.y + threadIdx.y;
#ifdef FBGEMM_USE_SUBWARP_SHUFFLE
const unsigned int shfl_sync_mask =
((1L << kThreadGroupSize) - 1) <<
(threadIdx.y % (kWarpSize / kThreadGroupSize) * kThreadGroupSize);
#else
const unsigned int shfl_sync_mask = 0xffffffffu;
#endif
constexpr int VEC_WIDTH = 4;
for (uint32_t run_id = start_run_id;
run_id < sorted_linear_indices_run.size(0) && run_id < sorted_linear_indices_num_runs[0];
run_id += gridDim.x * blockDim.y) {
const int64_t linear_index = sorted_linear_indices_run[run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[run_id];
const int32_t segment_end =
sorted_linear_indices_cumulative_run_lengths[run_id + 1];
const int32_t SL = segment_end - segment_start;
if (SL >= max_segment_length_per_warp) {
continue;
}
// now, each segment corresponds to exactly one table `t` and row in
// that table (`idx`). Thus, we can hoist out some of the book-keeping.
const auto info_0 = sorted_infos[segment_start];
{% if not nobag %}
int32_t t_0 = fd.Div(info_0); // info_0 / B;
{% else %}
int32_t t_0 = info_0 % T;
{% endif %}
int64_t hash_size = hash_size_cumsum[t_0];
{% if not nobag %}
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
{% endif %}
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = 0;
const int32_t sl_end = SL;
Vec4T<at::acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kThreadGroupSize) {
int32_t sl_j = sl + threadIdx.x;
{% if not nobag %}
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = D_offsets[t];
{% else %}
int64_t l_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t l = l_t / T;
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kThreadGroupSize && sl + j < sl_end; ++j) {
{% if not nobag %}
int32_t b_j = SHFL_SYNC(b, j);
int32_t D_start_j = SHFL_SYNC(D_start, j);
{% else %}
int32_t l_j = SHFL_SYNC(l, j);
{% endif %}
{% if weighted %}
at::acc_type<cache_t, true> idx_weight_j = SHFL_SYNC(idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
{% if not nobag %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% else %}
Vec4T<at::acc_type<grad_t, true>> grad_out_vec(&grad_output[l_j][d]);
{% endif %}
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].add_(grad_out_vec);
{% endif %}
}
}
}
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = static_cast<PlacementType>(weights_placements[t_0]);
if (weights_placement == PlacementType::DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == PlacementType::MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
at::acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = static_cast<PlacementType>({{ tensor }}_placements[t_0]);
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == PlacementType::DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
struct SharedMemory<Vec4T<at::acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<at::acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights){
qparams_template = weight_row_template.load_qparams();
}
{{ split_precomputation }}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[threadIdx.x + (i + threadIdx.y * kMaxVecsPerThread) * kThreadGroupSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate new qparams after row update
qparams_new = thrust_find_qparams<at::acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kThreadGroupSize], D);
weight_row_template.store_qparams(qparams_new);
// fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
weight_row_template.store(shared_weight_update_row[threadIdx.x + (i + threadIdx.y * kMaxVecsPerThread) * kThreadGroupSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH < D;
++i) {
int32_t d = (i * kThreadGroupSize + threadIdx.x) * VEC_WIDTH;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
}
}
{{ "void" if not dense else "Tensor" }} split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda(
Tensor grad_output,
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
{% if not nobag %}
Tensor D_offsets,
int64_t max_D,
{% else %}
int64_t D,
{% endif %}
Tensor hash_size_cumsum,
int64_t total_hash_size_bits,
Tensor indices,
Tensor offsets,
{% if not nobag %}
int64_t pooling_mode,
{% endif %}
{% if weighted %}
Tensor indice_weights,
{% endif %}
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
int64_t unused_,
int64_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
{% endif %}
{{ args.split_function_args | join(", ") }}) {
TENSOR_ON_CUDA_GPU(grad_output);
TENSOR_ON_CUDA_GPU(dev_weights);
{% if not dense %}
TENSOR_ON_CUDA_GPU(uvm_weights);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(weights_placements);
{% endif %}
TENSOR_ON_CUDA_GPU(weights_offsets);
{% if not nobag %}
TENSOR_ON_CUDA_GPU(D_offsets);
{% endif %}
TENSOR_ON_CUDA_GPU(hash_size_cumsum);
TENSOR_ON_CUDA_GPU(indices);
TENSOR_ON_CUDA_GPU(offsets);
{% if weighted %}
TENSOR_ON_CUDA_GPU(indice_weights);
{% endif %}
{% if not dense %}
TENSOR_ON_CUDA_GPU(lxu_cache_locations);
{% endif %}
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(dev_weights.get_device());
{% if dense %}
auto grad_dev_weights = zeros_like(dev_weights);
{% endif %}
// short-circuit if there are zero indices.
if (indices.numel() == 0) {
return {{ "grad_dev_weights" if dense else "" }};
}
{% if not nobag %}
int32_t T = D_offsets.numel() - 1;
{% else %}
int32_t T = weights_offsets.numel();
{% endif %}
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
const auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
auto BT_block_size = kMaxThreads / kWarpSize;
TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads);
{% if nobag %}
auto max_D = D;
{% endif %}
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
// V100: 96 KB; A100: 160 KB.
int max_shared_bytes = 0;
#ifndef __HIP_PLATFORM_HCC__
cudaDeviceGetAttribute(&max_shared_bytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, dev_weights.get_device());
#else
// MI100 has 64 KB local memory (shared memory) per workgroup
max_shared_bytes = 64 << 10;
#endif
C10_CUDA_KERNEL_LAUNCH_CHECK();
int shared_kb = max_shared_bytes >> 10;
// V100: 64 KB; A100: 96 KB.
#ifndef __HIP_PLATFORM_HCC__
// Use 2/3 of the available GPU shared mem; leave rooms for L1$.
int used_shared_kb = round_down(shared_kb * 2 / 3, 16);
TORCH_CHECK(used_shared_kb > 0);
#else
// MI100 has independent shared mem and L1
int used_shared_kb = shared_kb;
#endif
int used_shared_bytes = used_shared_kb << 10;
Tensor linear_indices, linear_indices_sorted;
Tensor infos_sorted;
Tensor sorted_linear_indices_run, sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths;
std::tie(
linear_indices,
linear_indices_sorted,
infos_sorted,
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths) =
transpose_embedding_input(
hash_size_cumsum,
total_hash_size_bits,
indices,
offsets,
{{"true" if nobag else "false"}});
{% if not dense %}
auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations);
if (lxu_cache_locations.size(0) > 0) {
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(radix_sort_pairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(at::kByte));
AT_CUDA_CHECK(radix_sort_pairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
}
{% endif %}
DISPATCH_EMB_GRAD_CACHE_TYPES(
dev_weights.scalar_type(),
grad_output.scalar_type(),
{% if not dense %}
lxu_cache_weights.scalar_type(),
{% else %}
dev_weights.scalar_type(),
{% endif %}
"split_embedding_backward_{{ optimizer }}_exact_kernel",
[&] {
{% if weighted %}
auto indice_weights_sorted = at::empty_like(indice_weights);
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(radix_sort_pairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
indice_weights.data_ptr<at::acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(at::kByte));
AT_CUDA_CHECK(radix_sort_pairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
indice_weights.data_ptr<at::acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<at::acc_type<cache_t, true>>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
}
{% endif %}
// early memory release
linear_indices.reset();
linear_indices_sorted.reset();
auto grad_output_accessor = grad_output.packed_accessor64<grad_t, 2, at::RestrictPtrTraits>();
{% if not nobag %}
Tensor grad_output_mean;
if (static_cast<PoolingMode>(pooling_mode) == PoolingMode::MEAN) {
grad_output_mean = at::empty_like(grad_output);
grad_mean_kernel<grad_t>
<<<div_round_up((B * T), kMaxThreads / kWarpSize),
dim3(kWarpSize, kMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output_accessor,
D_offsets
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
offsets
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
grad_output_mean.packed_accessor64<
grad_t, 2, at::RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
grad_output_accessor = grad_output_mean.packed_accessor64<
grad_t, 2, at::RestrictPtrTraits>();
}
{% endif %}
{% if not dense %}
at::PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs =
at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
{% endif %}
// kMaxElemPerThread is # of elements handled by thread if we use a full warp for a row
// We consider kMaxElemPerThread 1 and 2, and then a multiple of 4.
{% for kMaxElemPerThread in range(1, max_embedding_dim // (items_per_warp // 4) + 1) %}
{% if kMaxElemPerThread in [1, 2] or kMaxElemPerThread % 4 == 0 %}
if (max_D <= {{ items_per_warp // 4 * kMaxElemPerThread }}) {
// hipcc can't use max in constexpr
constexpr int kMaxVecsPerThread = {{ kMaxElemPerThread }} / 4 >= 1 ? {{ kMaxElemPerThread }} / 4 : 1;
// If max_D is small, use fewer number of threads than kWarpSize.
#ifdef FBGEMM_USE_SUBWARP_SHUFFLE
constexpr int kThreadGroupSize = kWarpSize / std::max(4 / {{ kMaxElemPerThread }}, 1);
#else
constexpr int kThreadGroupSize = kWarpSize;
#endif
// Stay under used_shared_kb of shared memory (V100: 64 KB; A100: 96 KB), BT_block_size must be a power of two.
while (BT_block_size * sizeof(at::acc_type<cache_t, true>) * 4 * kWarpSize * kMaxVecsPerThread >= used_shared_bytes) {
BT_block_size /= 2;
}
TORCH_CHECK(BT_block_size >= 1);
if (std::is_same<emb_t, double>::value) {
// Otherwise we see CUDA kernel launch failures despite the above checks.
BT_block_size = 1;
}
auto long_run_ids = at::empty_like(sorted_linear_indices_run_lengths);
auto num_long_run_ids = at::zeros({1}, indices.options().dtype(at::kInt));
const bool use_deterministic_algorithms = at::globalContext().deterministicAlgorithms();
const int max_segment_length_per_cta = use_deterministic_algorithms ? INT_MAX : 1024;
Tensor long_run_id_to_really_long_run_ids;
if (use_deterministic_algorithms) {
long_run_id_to_really_long_run_ids =
at::empty(0, sorted_linear_indices_run_lengths.options());
} else {
long_run_id_to_really_long_run_ids =
at::empty_like(sorted_linear_indices_run_lengths);
}
auto num_really_long_run_ids = at::zeros({1}, indices.options().dtype(at::kInt));
auto grad_accum_counter = at::empty(
use_deterministic_algorithms ? 0 : (indices.numel() / max_segment_length_per_cta),
indices.options().dtype(at::kInt));
split_embedding_backward_codegen_find_long_segments<<<
div_round_up(indices.numel(), kMaxThreads),
kMaxThreads,
0,
at::cuda::getCurrentCUDAStream()
>>>(
sorted_linear_indices_num_runs.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_run_lengths.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
num_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
long_run_id_to_really_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
num_really_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
grad_accum_counter.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
max_segment_length_per_warp,
max_segment_length_per_cta,
use_deterministic_algorithms);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// A temp buffer to accumulate gradients with atomics.
auto temp_grad_accum = at::zeros(
{use_deterministic_algorithms ? 0 : grad_accum_counter.numel(), max_D},
grad_output.options().dtype(std::is_same<cache_t, double>::value ? at::kDouble : at::kFloat));
int32_t grid_size = std::min(
div_round_up(long_run_ids.numel(), kMaxThreads),
get_max_thread_blocks_());
// Check https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x
// "Compute capability 7.x devices allow a single thread block to
// address the full capacity of shared memory: 96 KB on Volta,
// 64 KB on Turing. Kernels relying on shared memory allocations
// over 48 KB per block are architecture-specific, as such they
// must use dynamic shared memory (rather than statically sized
// arrays) and require an explicit opt-in using cudaFuncSetAttribute()".
#ifndef __HIP_PLATFORM_HCC__
cudaFuncSetAttribute(
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
used_shared_bytes); // V100: 64 KB; A100: 96 KB.
#endif
C10_CUDA_KERNEL_LAUNCH_CHECK();
// dividing by kMaxThreads is a heuristic to avoid num of blocks far exceeding num_long_run_ids[0]
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>
<<<grid_size,
dim3(kThreadGroupSize, BT_block_size),
BT_block_size * sizeof(at::acc_type<cache_t, true>) * 4 * kWarpSize *
kMaxVecsPerThread,
at::cuda::getCurrentCUDAStream()>>>(
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
B,
D,
{% endif %}
hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
num_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<cache_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not nobag %}
FixedDivisor(B),
{% endif %}
long_run_id_to_really_long_run_ids.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
temp_grad_accum.packed_accessor32<at::acc_type<cache_t, true>, 2, at::RestrictPtrTraits>(),
grad_accum_counter.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
max_segment_length_per_cta,
use_deterministic_algorithms,
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_CUDA_KERNEL_LAUNCH_CHECK();
grid_size = std::min(
div_round_up(sorted_linear_indices_run.numel(), kBackwardMaxThreads / kThreadGroupSize),
get_max_thread_blocks_());
// Shared memory is not needed for non uint8_t weights
size_t shmem_bytes = 0;
if (std::is_same<emb_t, uint8_t>::value) {
shmem_bytes = BT_block_size * sizeof(
at::acc_type<cache_t, true>) * 4 * kWarpSize * kMaxVecsPerThread;
#ifndef __HIP_PLATFORM_HCC__
cudaFuncSetAttribute(
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
used_shared_bytes); // V100: 64 KB; A100: 96 KB.
#endif
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
split_embedding{{ "_nobag" if nobag else "" }}_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1<
emb_t,
grad_t,
cache_t,
kMaxVecsPerThread,
kThreadGroupSize>
<<<grid_size,
dim3(kThreadGroupSize, kBackwardMaxThreads / kThreadGroupSize),
shmem_bytes,
at::cuda::getCurrentCUDAStream()>>>(
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
B,
D,
{% endif %}
hash_size_cumsum.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% if not nobag %}
infos_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% else %}
infos_sorted.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits>(),
{% endif %}
sorted_linear_indices_num_runs
.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
max_segment_length_per_warp,
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<cache_t, 1, at::RestrictPtrTraits>(),
{% endif %}
{% if not nobag %}
FixedDivisor(B),
{% endif %}
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
{% endif %}
{% endfor %}
});
return {{ "grad_dev_weights" if dense else "" }};
}
// clang-format on
|
9653cda2f2f736f219392b0983d47a2fb53b1a57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void nesterovsUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo, const T lr, const T momentum) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto init = reinterpret_cast<const T*>(vin);
auto up = reinterpret_cast<T*>(vz);
auto st = reinterpret_cast<T*>(vst);
__shared__ Nd4jLong xLen;
__shared__ T momentumT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInSame, bXStSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
momentumT = (-momentum - 1);
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stShapeInfo) && 1 == shape::elementWiseStride(inShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(inShapeInfo) &&
shape::order(xShapeInfo) == shape::order(stShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInSame = shape::haveSameShapeAndStrides(xShapeInfo, inShapeInfo);
bXStSame = shape::haveSameShapeAndStrides(xShapeInfo, stShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
auto xOffset = i, zOffset = i, initOffset = i, stOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initOffset = bXInSame ? xOffset : shape::getOffset(inShapeInfo, coords);
stOffset = bXStSame ? xOffset : shape::getOffset(stShapeInfo, coords);
}
T prevState = momentum * init[initOffset];
st[stOffset] = prevState - lr * grad[xOffset];
up[zOffset] = prevState + momentumT * st[stOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
linkage void nesterovsUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream,
const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo,
const double dLr, const double dMomentum) {
const T lr = static_cast<T>(dLr);
const T momentum = static_cast<T>(dMomentum);
hipLaunchKernelGGL(( nesterovsUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, * stream, vx, xShapeInfo, vin, inShapeInfo,
vz, zShapeInfo, vst, stShapeInfo, lr, momentum);
}
///////////////////////////////////////////////////////////////////
void updaterNesterovs(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initState,
NDArray& update, NDArray& stateV, const double dLr, const double dMomentum) {
PointersManager manager(context, "nesterovsUpdater");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({ &update, &stateV }, { &gradient, &initState });
BUILD_SINGLE_SELECTOR(gradient.dataType(), nesterovsUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock,
context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(),
initState.specialBuffer(), initState.specialShapeInfo(),
update.specialBuffer(), update.specialShapeInfo(),
stateV.specialBuffer(), stateV.specialShapeInfo(), dLr, dMomentum), FLOAT_TYPES);
NDArray::registerSpecialUse({ &update, &stateV }, { &gradient, &initState });
manager.synchronize();
}
}
}
}
| 9653cda2f2f736f219392b0983d47a2fb53b1a57.cu | /*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void nesterovsUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo, const T lr, const T momentum) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto init = reinterpret_cast<const T*>(vin);
auto up = reinterpret_cast<T*>(vz);
auto st = reinterpret_cast<T*>(vst);
__shared__ Nd4jLong xLen;
__shared__ T momentumT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInSame, bXStSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
momentumT = (-momentum - 1);
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stShapeInfo) && 1 == shape::elementWiseStride(inShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(inShapeInfo) &&
shape::order(xShapeInfo) == shape::order(stShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInSame = shape::haveSameShapeAndStrides(xShapeInfo, inShapeInfo);
bXStSame = shape::haveSameShapeAndStrides(xShapeInfo, stShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
auto xOffset = i, zOffset = i, initOffset = i, stOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initOffset = bXInSame ? xOffset : shape::getOffset(inShapeInfo, coords);
stOffset = bXStSame ? xOffset : shape::getOffset(stShapeInfo, coords);
}
T prevState = momentum * init[initOffset];
st[stOffset] = prevState - lr * grad[xOffset];
up[zOffset] = prevState + momentumT * st[stOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
linkage void nesterovsUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream,
const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo,
const double dLr, const double dMomentum) {
const T lr = static_cast<T>(dLr);
const T momentum = static_cast<T>(dMomentum);
nesterovsUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, 256, * stream>>>(vx, xShapeInfo, vin, inShapeInfo,
vz, zShapeInfo, vst, stShapeInfo, lr, momentum);
}
///////////////////////////////////////////////////////////////////
void updaterNesterovs(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initState,
NDArray& update, NDArray& stateV, const double dLr, const double dMomentum) {
PointersManager manager(context, "nesterovsUpdater");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({ &update, &stateV }, { &gradient, &initState });
BUILD_SINGLE_SELECTOR(gradient.dataType(), nesterovsUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock,
context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(),
initState.specialBuffer(), initState.specialShapeInfo(),
update.specialBuffer(), update.specialShapeInfo(),
stateV.specialBuffer(), stateV.specialShapeInfo(), dLr, dMomentum), FLOAT_TYPES);
NDArray::registerSpecialUse({ &update, &stateV }, { &gradient, &initState });
manager.synchronize();
}
}
}
}
|
b7bede93de7613f982d00bf4fe0b1a44957ffd28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/upsample_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor){
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(
int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y){
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
__global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data,int no_elements, int scale_factor, int d1, int d2,int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
for (int i = 0; i < scale_factor; i++) {
for (int j = 0; j < scale_factor; j++) {
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) {
int d1, d2, d3;
d1 = top[0]->shape(1);
d2 = top[0]->shape(2);
d3 = top[0]->shape(3);
int no_elements = top[0]->count();
upscale<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(no_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->gpu_data(),top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3);
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int d1, d2, d3;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
d1 = bottom[0]->shape(1);
d2 = bottom[0]->shape(2);
d3 = bottom[0]->shape(3);
int no_elements = bottom[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
downscale<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(no_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3);
}
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
} // namespace caffe
| b7bede93de7613f982d00bf4fe0b1a44957ffd28.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/upsample_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor){
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(
int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y){
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
__global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data,int no_elements, int scale_factor, int d1, int d2,int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
for (int i = 0; i < scale_factor; i++) {
for (int j = 0; j < scale_factor; j++) {
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) {
int d1, d2, d3;
d1 = top[0]->shape(1);
d2 = top[0]->shape(2);
d3 = top[0]->shape(3);
int no_elements = top[0]->count();
upscale<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->gpu_data(),top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3);
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int d1, d2, d3;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
d1 = bottom[0]->shape(1);
d2 = bottom[0]->shape(2);
d3 = bottom[0]->shape(3);
int no_elements = bottom[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
downscale<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>(
bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3);
}
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
} // namespace caffe
|
9649ececc8a3b5bcedac5e7bc09f4d1ee279a161.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <cmath>
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(hipSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * ::log(eps) - (1.0f - y) * ::log(1.0f - eps);
} else if (pneg < eps) {
return -y * ::log(1.0f - eps) - (1.0f - y) * ::log(eps);
} else {
return -y * ::log(py) - (1.0f - y) * ::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - ::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - ::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -::log(-theta);
bst_float c = 1. / psi * ::log(y/psi) - ::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_);
bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
if (info.labels_.Size() == 0) {
LOG(WARNING) << "label set is empty";
}
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
| 9649ececc8a3b5bcedac5e7bc09f4d1ee279a161.cu | /*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <cmath>
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps);
} else if (pneg < eps) {
return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps);
} else {
return -y * std::log(py) - (1.0f - y) * std::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - std::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - std::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -std::log(-theta);
bst_float c = 1. / psi * std::log(y/psi) - std::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_);
bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
if (info.labels_.Size() == 0) {
LOG(WARNING) << "label set is empty";
}
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
|
bed30922a322bfbc9a8f51467bbfc2910857b86a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Mark Gates
@author Azzam Haidar
@generated from zlaset.cu normal z -> s, Fri Mar 13 15:22:19 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slaset_full_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_S_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to slaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_lower_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to slaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_upper_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void slaset_full_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_lower_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_upper_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slaset_full_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_lower_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_upper_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SLASET_Q initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
This is the same as SLASET, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag REAL
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag REAL
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C"
void magmablas_slaset_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slaset_lower_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slaset_upper_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, hipMemset is faster.
// TODO: use hipMemset2D ?
if ( m == ldda &&
MAGMA_S_EQUAL( offdiag, MAGMA_S_ZERO ) &&
MAGMA_S_EQUAL( diag, MAGMA_S_ZERO ) )
{
size_t size = m*n;
hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(float), queue );
assert( err == hipSuccess );
}
else {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
/**
@see magmablas_slaset_q
@ingroup magma_saux2
********************************************************************/
extern "C"
void magmablas_slaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dA, magma_int_t ldda )
{
magmablas_slaset_q( uplo, m, n, offdiag, diag, dA, ldda, magma_stream );
}
////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void magmablas_slaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( slaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( slaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dAarray, ldda);
}
else {
hipLaunchKernelGGL(( slaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dAarray, ldda);
}
}
| bed30922a322bfbc9a8f51467bbfc2910857b86a.cu | /*
-- MAGMA (version 1.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Mark Gates
@author Azzam Haidar
@generated from zlaset.cu normal z -> s, Fri Mar 13 15:22:19 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slaset_full_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_S_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to slaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_lower_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to slaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_upper_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void slaset_full_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_lower_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_upper_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slaset_full_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_lower_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_upper_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SLASET_Q initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
This is the same as SLASET, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag REAL
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag REAL
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C"
void magmablas_slaset_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slaset_lower_kernel<<< grid, threads, 0, queue >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slaset_upper_kernel<<< grid, threads, 0, queue >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, cudaMemset is faster.
// TODO: use cudaMemset2D ?
if ( m == ldda &&
MAGMA_S_EQUAL( offdiag, MAGMA_S_ZERO ) &&
MAGMA_S_EQUAL( diag, MAGMA_S_ZERO ) )
{
size_t size = m*n;
cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(float), queue );
assert( err == cudaSuccess );
}
else {
for( int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
/**
@see magmablas_slaset_q
@ingroup magma_saux2
********************************************************************/
extern "C"
void magmablas_slaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dA, magma_int_t ldda )
{
magmablas_slaset_q( uplo, m, n, offdiag, diag, dA, ldda, magma_stream );
}
////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void magmablas_slaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
slaset_lower_kernel_batched<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
slaset_upper_kernel_batched<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dAarray, ldda);
}
else {
slaset_full_kernel_batched<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dAarray, ldda);
}
}
|
e3c2edeadb728ab01258085c9662fbdd397b59d4.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************************/
/************ Arturo Gonzalez Bencomo 172906, Arturo Torre Gonzalez 90226 ***********************/
/************************************************************************************************/
/********************************** MatrixMul.cu ************************************************/
/************************************************************************************************/
/** Este programa escrito en c++ hace una multiplicacion matricial de forma paralela utilizando */
/** cuda. ***************************************************************************************/
/*Se importan librerias*/
#include <iostream> //Equivalente a stdio.h
#include <vector> //Clase vector que permite crear arrays dinamicos.
#include <stdlib.h> //stdlib de c
#include <time.h> //Manejo de funciones de tiempo
#include <hip/hip_runtime.h> //Administrador del runtime de cuda
#include "kernel.h" //Importamos el archivo de prototipo del kernel
#include "kernel.hip" //Implementacion del kernel
#include "dev_array.h" //Estructura de datos tipo array
#include <math.h> //Funciones matematicas, la necesitamos para rellenar la matriz inicial
using namespace std;
int main()
{
// Este Programa llevara a cabo la multiplicacion matricial A*B y asignara el resultado a C
int N = 16;
int SIZE = N*N; //Tamanio de las matrices 16x16
// Asignamos el espacio en memoria de las matrices en la memoria principal
vector<float> h_A(SIZE);
vector<float> h_B(SIZE);
vector<float> h_C(SIZE);
// Inicializamos las matrices dentro del programa principal con valores iniciales de seno y coseno (funciones de la libreria math.h)
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
h_A[i*N+j] = sin(i);
h_B[i*N+j] = cos(i);
}
}
// Reservamos el espacio en memoria en el GPU
dev_array<float> d_A(SIZE);
dev_array<float> d_B(SIZE);
dev_array<float> d_C(SIZE);
// Copiamos las variables en memoria principal a memoria del device
d_A.set(&h_A[0], SIZE);
d_B.set(&h_B[0], SIZE);
//Mandamos llamar la funcion matrixMultiplication definida en el archivo kernel.cu, que es la que lleva a cabo el procesamiento paralelo.
matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N);
//
hipDeviceSynchronize();
d_C.get(&h_C[0], SIZE);
hipDeviceSynchronize();
// Imprimimos matrices originales y salida para comprobar la operacion
// h_A
cout << "MATRIZ A\n";
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
cout << h_A[ROW * N + COL];
cout << " ";
}
cout << "\n";
}
// h_B
cout << "MATRIZ B\n";
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
cout << h_B[ROW * N + COL];
cout << " ";
}
cout << "\n";
}
// h_C
cout << "MATRIZ C\n";
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
cout << h_C[ROW * N + COL];
cout << " ";
}
cout << "\n";
}
return 0;
}
| e3c2edeadb728ab01258085c9662fbdd397b59d4.cu | /************************************************************************************************/
/************ Arturo Gonzalez Bencomo 172906, Arturo Torre Gonzalez 90226 ***********************/
/************************************************************************************************/
/********************************** MatrixMul.cu ************************************************/
/************************************************************************************************/
/** Este programa escrito en c++ hace una multiplicacion matricial de forma paralela utilizando */
/** cuda. ***************************************************************************************/
/*Se importan librerias*/
#include <iostream> //Equivalente a stdio.h
#include <vector> //Clase vector que permite crear arrays dinamicos.
#include <stdlib.h> //stdlib de c
#include <time.h> //Manejo de funciones de tiempo
#include <cuda_runtime.h> //Administrador del runtime de cuda
#include "kernel.h" //Importamos el archivo de prototipo del kernel
#include "kernel.cu" //Implementacion del kernel
#include "dev_array.h" //Estructura de datos tipo array
#include <math.h> //Funciones matematicas, la necesitamos para rellenar la matriz inicial
using namespace std;
int main()
{
// Este Programa llevara a cabo la multiplicacion matricial A*B y asignara el resultado a C
int N = 16;
int SIZE = N*N; //Tamanio de las matrices 16x16
// Asignamos el espacio en memoria de las matrices en la memoria principal
vector<float> h_A(SIZE);
vector<float> h_B(SIZE);
vector<float> h_C(SIZE);
// Inicializamos las matrices dentro del programa principal con valores iniciales de seno y coseno (funciones de la libreria math.h)
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
h_A[i*N+j] = sin(i);
h_B[i*N+j] = cos(i);
}
}
// Reservamos el espacio en memoria en el GPU
dev_array<float> d_A(SIZE);
dev_array<float> d_B(SIZE);
dev_array<float> d_C(SIZE);
// Copiamos las variables en memoria principal a memoria del device
d_A.set(&h_A[0], SIZE);
d_B.set(&h_B[0], SIZE);
//Mandamos llamar la funcion matrixMultiplication definida en el archivo kernel.cu, que es la que lleva a cabo el procesamiento paralelo.
matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N);
//
cudaDeviceSynchronize();
d_C.get(&h_C[0], SIZE);
cudaDeviceSynchronize();
// Imprimimos matrices originales y salida para comprobar la operacion
// h_A
cout << "MATRIZ A\n";
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
cout << h_A[ROW * N + COL];
cout << " ";
}
cout << "\n";
}
// h_B
cout << "MATRIZ B\n";
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
cout << h_B[ROW * N + COL];
cout << " ";
}
cout << "\n";
}
// h_C
cout << "MATRIZ C\n";
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
cout << h_C[ROW * N + COL];
cout << " ";
}
cout << "\n";
}
return 0;
}
|
abd0d578d01b0207d0b626e337dacdf4606712df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "basic_kernels_cuda.cuh"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__ void DFSPH_setVector3dBufferToZero_kernel(Vector3d* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = Vector3d(0, 0, 0);
}
template<class T> __global__ void cuda_setBufferToValue_kernel(T* buff, T value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = value;
}
template __global__ void cuda_setBufferToValue_kernel<Vector3d>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<int>(int* buff, int value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<RealCuda>(RealCuda* buff, RealCuda value, unsigned int buff_size);
template<class T> __global__ void cuda_applyFactorToBuffer_kernel(T* buff, T value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] *= value;
}
template __global__ void cuda_applyFactorToBuffer_kernel<Vector3d>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_applyFactorToBuffer_kernel<int>(int* buff, int value, unsigned int buff_size);
template __global__ void cuda_applyFactorToBuffer_kernel<RealCuda>(RealCuda* buff, RealCuda value, unsigned int buff_size);
//note for the clamping type
//the main problem is that there are multiples ways to clamp a number and depending on the type of data there may be more
//for now I need the vector 3D so there are the 4 obvious min, max, absolute value(min and max) ; but also a clamping on the length of the vector
//also making it generic is probably impossible since the setter are different for the vector3D
//so I need a specialized kernel for the vector3D
//so for now here is how that parameter works
// 0 : keep anything below the parameter
// 1 : keep anything above the parameter
// 2 : keep anything below the absolute value
// 3 : keep anything above the absolute value
// 4 : vector special: if norm above value normalize it to the value, Ill read the first cell of the vector to know the clamping value
/// TODO impelemnt it all, for now I only need the 2 and 4 so I'll tag the others with an asm("trap")
template<int clamping_type> __global__ void cuda_clampV3dBufferToValue_kernel(Vector3d* buff, Vector3d value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
if (clamping_type==2) {
Vector3d v = buff[i];
v.toMin(value);
value *= -1;
v.toMax(value);
buff[i] = v;
}else if (clamping_type == 4) {
RealCuda l = buff[i].norm();
if (l > value.x) {
buff[i] *= value.x / l;
}
}else {
asm("trap;");
}
}
template __global__ void cuda_clampV3dBufferToValue_kernel<0>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<1>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<2>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<3>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<4>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template<class T, class T2>
__global__ void cuda_copyBufferCrossType_kernel(T* out, T2* in, unsigned int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) { return; }
out[i] = in[i];
}
template __global__ void cuda_copyBufferCrossType_kernel<RealCuda, int>(RealCuda* out, int* in, unsigned int size);
template __global__ void cuda_copyBufferCrossType_kernel<RealCuda, unsigned int>(RealCuda* out, unsigned int* in, unsigned int size);
template __global__ void cuda_copyBufferCrossType_kernel<unsigned int, RealCuda>(unsigned int* out, RealCuda* in, unsigned int size);
template __global__ void cuda_copyBufferCrossType_kernel<int, RealCuda>(int* out, RealCuda* in, unsigned int size);
__global__ void DFSPH_Histogram_kernel(unsigned int* in, unsigned int* out, unsigned int num_particles) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_particles) { return; }
atomicAdd(&(out[in[i]]), 1);
}
__global__ void DFSPH_setBufferValueToItself_kernel(unsigned int* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = i;
}
__global__ void apply_delta_to_buffer_kernel(Vector3d* buffer, Vector3d delta, const unsigned int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) { return; }
buffer[i] += delta;
}
template<class T>
__global__ void fillRandom_kernel(unsigned int *buff, unsigned int nbElements, T min, T max, hiprandState_t *state) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= 1) { return; }
hiprandState_t localState = *state;
for (int j = 0; j < nbElements; ++j) {
T x = hiprand(&localState);
x *= (max - min);
x += min;
buff[i] = x;
}
*state = localState;
}
| abd0d578d01b0207d0b626e337dacdf4606712df.cu | #include "basic_kernels_cuda.cuh"
#include <curand.h>
#include <curand_kernel.h>
__global__ void DFSPH_setVector3dBufferToZero_kernel(Vector3d* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = Vector3d(0, 0, 0);
}
template<class T> __global__ void cuda_setBufferToValue_kernel(T* buff, T value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = value;
}
template __global__ void cuda_setBufferToValue_kernel<Vector3d>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<int>(int* buff, int value, unsigned int buff_size);
template __global__ void cuda_setBufferToValue_kernel<RealCuda>(RealCuda* buff, RealCuda value, unsigned int buff_size);
template<class T> __global__ void cuda_applyFactorToBuffer_kernel(T* buff, T value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] *= value;
}
template __global__ void cuda_applyFactorToBuffer_kernel<Vector3d>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_applyFactorToBuffer_kernel<int>(int* buff, int value, unsigned int buff_size);
template __global__ void cuda_applyFactorToBuffer_kernel<RealCuda>(RealCuda* buff, RealCuda value, unsigned int buff_size);
//note for the clamping type
//the main problem is that there are multiples ways to clamp a number and depending on the type of data there may be more
//for now I need the vector 3D so there are the 4 obvious min, max, absolute value(min and max) ; but also a clamping on the length of the vector
//also making it generic is probably impossible since the setter are different for the vector3D
//so I need a specialized kernel for the vector3D
//so for now here is how that parameter works
// 0 : keep anything below the parameter
// 1 : keep anything above the parameter
// 2 : keep anything below the absolute value
// 3 : keep anything above the absolute value
// 4 : vector special: if norm above value normalize it to the value, Ill read the first cell of the vector to know the clamping value
/// TODO impelemnt it all, for now I only need the 2 and 4 so I'll tag the others with an asm("trap")
template<int clamping_type> __global__ void cuda_clampV3dBufferToValue_kernel(Vector3d* buff, Vector3d value, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
if (clamping_type==2) {
Vector3d v = buff[i];
v.toMin(value);
value *= -1;
v.toMax(value);
buff[i] = v;
}else if (clamping_type == 4) {
RealCuda l = buff[i].norm();
if (l > value.x) {
buff[i] *= value.x / l;
}
}else {
asm("trap;");
}
}
template __global__ void cuda_clampV3dBufferToValue_kernel<0>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<1>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<2>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<3>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template __global__ void cuda_clampV3dBufferToValue_kernel<4>(Vector3d* buff, Vector3d value, unsigned int buff_size);
template<class T, class T2>
__global__ void cuda_copyBufferCrossType_kernel(T* out, T2* in, unsigned int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) { return; }
out[i] = in[i];
}
template __global__ void cuda_copyBufferCrossType_kernel<RealCuda, int>(RealCuda* out, int* in, unsigned int size);
template __global__ void cuda_copyBufferCrossType_kernel<RealCuda, unsigned int>(RealCuda* out, unsigned int* in, unsigned int size);
template __global__ void cuda_copyBufferCrossType_kernel<unsigned int, RealCuda>(unsigned int* out, RealCuda* in, unsigned int size);
template __global__ void cuda_copyBufferCrossType_kernel<int, RealCuda>(int* out, RealCuda* in, unsigned int size);
__global__ void DFSPH_Histogram_kernel(unsigned int* in, unsigned int* out, unsigned int num_particles) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_particles) { return; }
atomicAdd(&(out[in[i]]), 1);
}
__global__ void DFSPH_setBufferValueToItself_kernel(unsigned int* buff, unsigned int buff_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= buff_size) { return; }
buff[i] = i;
}
__global__ void apply_delta_to_buffer_kernel(Vector3d* buffer, Vector3d delta, const unsigned int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) { return; }
buffer[i] += delta;
}
template<class T>
__global__ void fillRandom_kernel(unsigned int *buff, unsigned int nbElements, T min, T max, curandState *state) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= 1) { return; }
curandState localState = *state;
for (int j = 0; j < nbElements; ++j) {
T x = curand(&localState);
x *= (max - min);
x += min;
buff[i] = x;
}
*state = localState;
}
|
16e9e24eabd8e9240896b890abeb2b707d0fe41d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <quda_internal.h>
#include <blas_quda.h>
#include <color_spinor_field.h>
#include <face_quda.h> // this is where the MPI / QMP depdendent code is
#include <hip/hip_complex.h>
#define REDUCE_MAX_BLOCKS 65536
#define REDUCE_DOUBLE 64
#define REDUCE_KAHAN 32
#if (__CUDA_ARCH__ >= 130)
#define REDUCE_TYPE REDUCE_DOUBLE
#define QudaSumFloat double
#define QudaSumComplex hipDoubleComplex
#define QudaSumFloat3 double3
#else
#define REDUCE_TYPE REDUCE_KAHAN
#define QudaSumFloat float
#define QudaSumComplex hipComplex
#define QudaSumFloat3 float3
#endif
// These are used for reduction kernels
static QudaSumFloat *d_reduceFloat=0;
static QudaSumComplex *d_reduceComplex=0;
static QudaSumFloat3 *d_reduceFloat3=0;
static QudaSumFloat *h_reduceFloat=0;
static QudaSumComplex *h_reduceComplex=0;
static QudaSumFloat3 *h_reduceFloat3=0;
namespace quda {
unsigned long long blas_flops;
unsigned long long blas_bytes;
}
static dim3 blasBlock;
static dim3 blasGrid;
// generated by blas_test
#include <blas_param.h>
double2 operator+(const double2& x, const double2 &y) {
return make_double2(x.x + y.x, x.y + y.y);
}
double3 operator+(const double3& x, const double3 &y) {
double3 z;
z.x = x.x + y.x; z.y = x.y + y.y; z.z = x.z + y.z;
return z;
}
__device__ float2 operator*(const float a, const float2 x) {
float2 y;
y.x = a*x.x;
y.y = a*x.y;
return y;
}
template <typename Float2>
__device__ Float2 operator+(const Float2 x, const Float2 y) {
Float2 z;
z.x = x.x + y.x;
z.y = x.y + y.y;
return z;
}
template <typename Float2>
__device__ Float2 operator+=(Float2 &x, const Float2 y) {
x.x += y.x;
x.y += y.y;
return x;
}
template <typename Float2>
__device__ Float2 operator-=(Float2 &x, const Float2 y) {
x.x -= y.x;
x.y -= y.y;
return x;
}
template <typename Float, typename Float2>
__device__ Float2 operator*=(Float2 &x, const Float a) {
x.x *= a;
x.y *= a;
return x;
}
template <typename Float>
__device__ float4 operator*=(float4 &a, const Float &b) {
a.x *= b;
a.y *= b;
a.z *= b;
a.w *= b;
return a;
}
void zeroCuda(cudaColorSpinorField &a) { a.zero(); }
// blasTuning = 1 turns off error checking
static QudaTune blasTuning = QUDA_TUNE_NO;
namespace quda {
void initBlas(void)
{
if (!d_reduceFloat) {
if (hipMalloc((void**) &d_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == hipErrorMemoryAllocation) {
errorQuda("Error allocating device reduction array");
}
}
if (!d_reduceComplex) {
if (hipMalloc((void**) &d_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == hipErrorMemoryAllocation) {
errorQuda("Error allocating device reduction array");
}
}
if (!d_reduceFloat3) {
if (hipMalloc((void**) &d_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == hipErrorMemoryAllocation) {
errorQuda("Error allocating device reduction array");
}
}
if (!h_reduceFloat) {
if (hipHostMalloc((void**) &h_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == hipErrorMemoryAllocation) {
errorQuda("Error allocating host reduction array");
}
}
if (!h_reduceComplex) {
if (hipHostMalloc((void**) &h_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == hipErrorMemoryAllocation) {
errorQuda("Error allocating host reduction array");
}
}
if (!h_reduceFloat3) {
if (hipHostMalloc((void**) &h_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == hipErrorMemoryAllocation) {
errorQuda("Error allocating host reduction array");
}
}
}
void endBlas(void)
{
if (d_reduceFloat) {
hipFree(d_reduceFloat);
d_reduceFloat = 0;
}
if (d_reduceComplex) {
hipFree(d_reduceComplex);
d_reduceComplex = 0;
}
if (d_reduceFloat3) {
hipFree(d_reduceFloat3);
d_reduceFloat3 = 0;
}
if (h_reduceFloat) {
hipHostFree(h_reduceFloat);
h_reduceFloat = 0;
}
if (h_reduceComplex) {
hipHostFree(h_reduceComplex);
h_reduceComplex = 0;
}
if (h_reduceFloat3) {
hipHostFree(h_reduceFloat3);
h_reduceFloat3 = 0;
}
}
void setBlasTuning(QudaTune tune)
{
blasTuning = tune;
}
void setBlasParam(int kernel, int prec, int threads, int blocks)
{
blas_threads[kernel][prec] = threads;
blas_blocks[kernel][prec] = blocks;
}
}
void setBlock(int kernel, int length, QudaPrecision precision)
{
int prec;
switch(precision) {
case QUDA_HALF_PRECISION:
prec = 0;
break;
case QUDA_SINGLE_PRECISION:
prec = 1;
break;
case QUDA_DOUBLE_PRECISION:
prec = 2;
break;
}
int blocks = min(blas_blocks[kernel][prec], max(length/blas_threads[kernel][prec], 1));
blasBlock.x = blas_threads[kernel][prec];
blasBlock.y = 1;
blasBlock.z = 1;
blasGrid.x = blocks;
blasGrid.y = 1;
blasGrid.z = 1;
}
#if (__CUDA_ARCH__ >= 130)
static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
int4 v = tex1Dfetch(t,i);
return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z));
}
#else
static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
// do nothing
return make_double2(0.0, 0.0);
}
#endif
float2 __device__ read_Float2(float2 *x, int i) {
return make_float2(x[i].x, x[i].y);
}
double2 __device__ read_Float2(double2 *x, int i) {
return make_double2(x[i].x, x[i].y);
}
#if (__CUDA_ARCH__ >= 200)
#define READ_DOUBLE2_TEXTURE(x, i) \
read_Float2(x, i)
#else
#define READ_DOUBLE2_TEXTURE(x, i) \
fetch_double2(x##TexDouble2, i)
#endif
#define READ_FLOAT2_TEXTURE(x, i) \
tex1Dfetch(x##TexSingle2, i)
float2 __device__ make_Float2(float2 x) {
return make_float2(x.x, x.y);
}
double2 __device__ make_Float2(double2 x) {
return make_double2(x.x, x.y);
}
#define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \
float a##c = tex1Dfetch(texNorm, i); \
float4 a##0 = tex1Dfetch(texHalf, i + 0*length); \
float4 a##1 = tex1Dfetch(texHalf, i + 1*length); \
float4 a##2 = tex1Dfetch(texHalf, i + 2*length); \
float4 a##3 = tex1Dfetch(texHalf, i + 3*length); \
float4 a##4 = tex1Dfetch(texHalf, i + 4*length); \
float4 a##5 = tex1Dfetch(texHalf, i + 5*length); \
a##0 *= a##c; \
a##1 *= a##c; \
a##2 *= a##c; \
a##3 *= a##c; \
a##4 *= a##c; \
a##5 *= a##c;
#define RECONSTRUCT_HALF_SPINOR_ST(a, texHalf, texNorm, length) \
float a##c = tex1Dfetch(texNorm, i); \
float2 a##0 = tex1Dfetch(texHalf, i + 0*length); \
float2 a##1 = tex1Dfetch(texHalf, i + 1*length); \
float2 a##2 = tex1Dfetch(texHalf, i + 2*length); \
(a##0) *= a##c; \
(a##1) *= a##c; \
(a##2) *= a##c;
// Some musings on how to clean up the blas code using Boost
/*#define BOOST_RECONSTRUCT_HALF_SPINOR(z, j, a, texHalf, length) \
float4 a##k tex1Dfetch(texHalf, i + j*length); \
a##k *= a##c;
#define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \
BOOST_PP_REPEAT(6, BOOST_RECONSTRUCT_HALF_SPINOR, a, texHalf, length) \
*/
#define READ_HALF_SPINOR_TEX(a, tex, texNorm, length) \
float a##c = tex1Dfetch(texNorm, i); \
float4 a##0 = tex1Dfetch(tex, i + 0*length); \
float4 a##1 = tex1Dfetch(tex, i + 1*length); \
float4 a##2 = tex1Dfetch(tex, i + 2*length); \
float4 a##3 = tex1Dfetch(tex, i + 3*length); \
float4 a##4 = tex1Dfetch(tex, i + 4*length); \
float4 a##5 = tex1Dfetch(tex, i + 5*length); \
#define READ_HALF_SPINOR(a, tex, length) \
float4 a##0 = tex1Dfetch(tex, i + 0*length); \
float4 a##1 = tex1Dfetch(tex, i + 1*length); \
float4 a##2 = tex1Dfetch(tex, i + 2*length); \
float4 a##3 = tex1Dfetch(tex, i + 3*length); \
float4 a##4 = tex1Dfetch(tex, i + 4*length); \
float4 a##5 = tex1Dfetch(tex, i + 5*length); \
float a##c = a##N[i];
#define READ_HALF_SPINOR_ST(a, tex, length) \
float2 a##0 = tex1Dfetch(tex, i + 0*length); \
float2 a##1 = tex1Dfetch(tex, i + 1*length); \
float2 a##2 = tex1Dfetch(tex, i + 2*length); \
float a##c = a##N[i];
#define FAST_ABS_MAX(a, b) fmaxf(fabsf(a), fabsf(b));
#define FAST_MAX(a, b) fmaxf(a, b);
__device__ float fast_abs_max(float4 a) {
float c0 = FAST_ABS_MAX(a.x, a.y);
float c1 = FAST_ABS_MAX(a.z, a.w);
return FAST_MAX(c0, c1);
}
#define CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, n, a, length) { \
float c0 = fast_abs_max(a##0); \
float c1 = fast_abs_max(a##1); \
c0 = FAST_MAX(c0, c1); \
float c2 = fast_abs_max(a##2); \
float c3 = fast_abs_max(a##3); \
c1 = FAST_MAX(c2, c3); \
c0 = FAST_MAX(c0, c1); \
c2 = fast_abs_max(a##4); \
c3 = fast_abs_max(a##5); \
c1 = FAST_MAX(c2, c3); \
c0 = FAST_MAX(c0, c1); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \
(short)(C*(float)(a##0).z), (short)(C*(float)(a##0).w)); \
h[i+1*length] = make_short4((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y), \
(short)(C*(float)(a##1).z), (short)(C*(float)(a##1).w)); \
h[i+2*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \
(short)(C*(float)(a##2).z), (short)(C*(float)(a##2).w)); \
h[i+3*length] = make_short4((short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y), \
(short)(C*(float)(a##3).z), (short)(C*(float)(a##3).w)); \
h[i+4*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \
(short)(C*(float)(a##4).z), (short)(C*(float)(a##4).w)); \
h[i+5*length] = make_short4((short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y), \
(short)(C*(float)(a##5).z), (short)(C*(float)(a##5).w));}
#define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, n, a, length) \
{float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \
float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \
float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \
float c3 = fmaxf(fabsf((a##3).x), fabsf((a##3).y)); \
float c4 = fmaxf(fabsf((a##4).x), fabsf((a##4).y)); \
float c5 = fmaxf(fabsf((a##5).x), fabsf((a##5).y)); \
float c6 = fmaxf(fabsf((a##6).x), fabsf((a##6).y)); \
float c7 = fmaxf(fabsf((a##7).x), fabsf((a##7).y)); \
float c8 = fmaxf(fabsf((a##8).x), fabsf((a##8).y)); \
float c9 = fmaxf(fabsf((a##9).x), fabsf((a##9).y)); \
float c10 = fmaxf(fabsf((a##10).x), fabsf((a##10).y)); \
float c11 = fmaxf(fabsf((a##11).x), fabsf((a##11).y)); \
c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); c2 = fmaxf(c4, c5); c3 = fmaxf(c6, c7); \
c4 = fmaxf(c8, c9); c5 = fmaxf(c10, c11); c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); \
c2 = fmaxf(c4, c5); c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \
(short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \
h[i+1*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \
(short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y)); \
h[i+2*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \
(short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y)); \
h[i+3*length] = make_short4((short)(C*(float)(a##6).x), (short)(C*(float)(a##6).y), \
(short)(C*(float)(a##7).x), (short)(C*(float)(a##7).y)); \
h[i+4*length] = make_short4((short)(C*(float)(a##8).x), (short)(C*(float)(a##8).y), \
(short)(C*(float)(a##9).x), (short)(C*(float)(a##9).y)); \
h[i+5*length] = make_short4((short)(C*(float)(a##10).x), (short)(C*(float)(a##10).y), \
(short)(C*(float)(a##11).x), (short)(C*(float)(a##11).y));}
#define CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, n, a, length) \
{float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \
float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \
float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \
c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \
h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \
h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));}
#define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, n, a, length) \
{float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \
float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \
float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \
c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \
h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \
h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));}
#define SUM_FLOAT4(sum, a) \
float sum = fabs(a.x) + fabs(a.y) + fabs(a.z) + fabs(a.w);
#define SUM_FLOAT2(sum, a) \
float sum = fabs(a.x) + fabs(a.y);
#if (__CUDA_ARCH__ < 200)
#define REAL_DOT_FLOAT4(dot, a, b) \
float dot = a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
#else
#define REAL_DOT_FLOAT4(dot, a, b) \
float dot = fmaf(a.x, b.x, 0.0f); \
dot = fmaf(a.y, b.y, dot); \
dot = fmaf(a.z, b.z, dot); \
dot = fmaf(a.w, b.w, dot)
#endif
#define REAL_DOT_FLOAT2(dot, a, b) \
float dot = a.x*b.x + a.y*b.y;
#if (__CUDA_ARCH__ < 200)
#define IMAG_DOT_FLOAT4(dot, a, b) \
float dot = a.x*b.y - a.y*b.x + a.z*b.w - a.w*b.z;
#else
#define IMAG_DOT_FLOAT4(dot, a, b) \
float dot = fmaf(a.x, b.y, 0.0f); \
dot = fmaf(-a.y, b.x, dot); \
dot = fmaf(a.z, b.w, dot); \
dot = fmaf(-a.w, b.z, dot)
#endif
#define IMAG_DOT_FLOAT2(dot, a, b) \
float dot = a.x*b.y - a.y*b.x;
#define AX_FLOAT4(a, X) \
X.x *= a; X.y *= a; X.z *= a; X.w *= a;
#define AX_FLOAT2(a, X) \
X.x *= a; X.y *= a;
#define XPY_FLOAT4(X, Y) \
Y.x += X.x; Y.y += X.y; Y.z += X.z; Y.w += X.w;
#define XPY_FLOAT2(X, Y) \
Y.x += X.x; Y.y += X.y;
#define XMY_FLOAT4(X, Y) \
Y.x = X.x - Y.x; Y.y = X.y - Y.y; Y.z = X.z - Y.z; Y.w = X.w - Y.w;
#define XMY_FLOAT2(X, Y) \
Y.x = X.x - Y.x; Y.y = X.y - Y.y;
#define MXPY_FLOAT4(X, Y) \
Y.x -= X.x; Y.y -= X.y; Y.z -= X.z; Y.w -= X.w;
#define MXPY_FLOAT2(X, Y) \
Y.x -= X.x; Y.y -= X.y;
#if (__CUDA_ARCH__ < 200)
#define AXPY_FLOAT4(a, X, Y) \
Y.x += a*X.x; Y.y += a*X.y; \
Y.z += a*X.z; Y.w += a*X.w;
#else
#define AXPY_FLOAT4(a, X, Y) \
Y.x = fmaf(a, X.x, Y.x); Y.y = fmaf(a, X.y, Y.y); \
Y.z = fmaf(a, X.z, Y.z); Y.w = fmaf(a, X.w, Y.w);
#endif
#define AXPY_FLOAT2(a, X, Y) \
Y.x += a*X.x; Y.y += a*X.y;
#define AXPBY_FLOAT4(a, X, b, Y) \
Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \
Y.z = b*Y.z; Y.z += a*X.z; Y.w = b*Y.w; Y.w += a*X.w;
#define AXPBY_FLOAT2(a, X, b, Y) \
Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \
#if (__CUDA_ARCH__ < 200)
#define XPAY_FLOAT4(X, a, Y) \
Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y; \
Y.z = X.z + a*Y.z; Y.w = X.w + a*Y.w;
#else
#define XPAY_FLOAT4(X, a, Y) \
Y.x = fmaf(a, Y.x, X.x); Y.y = fmaf(a, Y.y, X.y); \
Y.z = fmaf(a, Y.z, X.z); Y.w = fmaf(a, Y.w, X.w);
#endif
#define XPAY_FLOAT2(X, a, Y) \
Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y;
#if (__CUDA_ARCH__ < 200)
#define CAXPY_FLOAT4(a, X, Y) \
Y.x += a.x*X.x; Y.x -= a.y*X.y; \
Y.y += a.y*X.x; Y.y += a.x*X.y; \
Y.z += a.x*X.z; Y.z -= a.y*X.w; \
Y.w += a.y*X.z; Y.w += a.x*X.w;
#else
#define CAXPY_FLOAT4(a, X, Y) \
Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \
Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y); \
Y.z = fmaf(a.x, X.z, Y.z); Y.z = fmaf(-a.y, X.w, Y.z); \
Y.w = fmaf(a.y, X.z, Y.w); Y.w = fmaf( a.x, X.w, Y.w);
#endif // (__CUDA_ARCH__ < 200)
#if (__CUDA_ARCH__ < 200)
#define CAXPY_FLOAT2(a, X, Y) \
Y.x += a.x*X.x; Y.x -= a.y*X.y; \
Y.y += a.y*X.x; Y.y += a.x*X.y;
#else
#define CAXPY_FLOAT2(a, X, Y) \
Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \
Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y);
#endif // (__CUDA_ARCH__ < 200)
#define CAXPY_DOUBLE2(a, X, Y) \
Y.x += a.x*X.x; Y.x -= a.y*X.y; \
Y.y += a.y*X.x; Y.y += a.x*X.y; \
#define CMAXPY_FLOAT4(a, X, Y) \
Y.x -= a.x*X.x; Y.x += a.y*X.y; \
Y.y -= a.y*X.x; Y.y -= a.x*X.y; \
Y.z -= a.x*X.z; Y.z += a.y*X.w; \
Y.w -= a.y*X.z; Y.w -= a.x*X.w;
#define CMAXPY_FLOAT2(a, X, Y) \
Y.x -= a.x*X.x; Y.x += a.y*X.y; \
Y.y -= a.y*X.x; Y.y -= a.x*X.y;
#define CAXPBY_FLOAT4(a, X, b, Y) \
{ float2 y; \
y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \
y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \
Y.x = y.x; Y.y = y.y; \
y.x = a.x*X.z; y.x -= a.y*X.w; y.x += b.x*Y.z; y.x -= b.y*Y.w; \
y.y = a.y*X.z; y.y += a.x*X.w; y.y += b.y*Y.z; y.y += b.x*Y.w; \
Y.z = y.x; Y.w = y.y;}
#define CAXPBY_FLOAT2(a, X, b, Y) \
{ float2 y; \
y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \
y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \
Y.x = y.x; Y.y = y.y;}
#define CXPAYPBZ_FLOAT4(X, a, Y, b, Z) \
{float2 z; \
z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \
z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \
Z.x = z.x; Z.y = z.y; \
z.x = X.z + a.x*Y.z; z.x -= a.y*Y.w; z.x += b.x*Z.z; z.x -= b.y*Z.w; \
z.y = X.w + a.y*Y.z; z.y += a.x*Y.w; z.y += b.y*Z.z; z.y += b.x*Z.w; \
Z.z = z.x; Z.w = z.y;}
#define CXPAYPBZ_FLOAT2(X, a, Y, b, Z) \
{float2 z; \
z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \
z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \
Z.x = z.x; Z.y = z.y;}
#if (__CUDA_ARCH__ < 200)
#define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \
Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \
Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y; \
Z.z += a.x*X.z - a.y*X.w + b.x*Y.z - b.y*Y.w; \
Z.w += a.y*X.z + a.x*X.w + b.y*Y.z + b.x*Y.w;
#else
#define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \
Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \
Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y); \
Z.z = fmaf(a.x, X.z, Z.z); Z.z = fmaf(-a.y, X.w, Z.z); Z.z = fmaf(b.x, Y.z, Z.z); Z.z = fmaf(-b.y, Y.w, Z.z); \
Z.w = fmaf(a.y, X.z, Z.w); Z.w = fmaf( a.x, X.w, Z.w); Z.w = fmaf(b.y, Y.z, Z.w); Z.w = fmaf( b.x, Y.w, Z.w);
#endif // (__CUDA_ARCH__ < 200)
#if (__CUDA_ARCH__ < 200)
#define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \
Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \
Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y;
#else
#define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \
Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \
Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y);
#endif // (__CUDA_ARCH__ < 200)
// Double precision input spinor field
texture<int4, 1> xTexDouble2;
texture<int4, 1> yTexDouble2;
texture<int4, 1> zTexDouble2;
texture<int4, 1> wTexDouble2;
texture<int4, 1> uTexDouble2;
// Single precision input spinor field
texture<float2, 1> xTexSingle2;
texture<float2, 1> yTexSingle2;
texture<float4, 1> xTexSingle4;
// Half precision input spinor field
texture<short4, 1, hipReadModeNormalizedFloat> texHalf1;
texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt1;
texture<float, 1, hipReadModeElementType> texNorm1;
// Half precision input spinor field
texture<short4, 1, hipReadModeNormalizedFloat> texHalf2;
texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt2;
texture<float, 1, hipReadModeElementType> texNorm2;
// Half precision input spinor field
texture<short4, 1, hipReadModeNormalizedFloat> texHalf3;
texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt3;
texture<float, 1, hipReadModeElementType> texNorm3;
// Half precision input spinor field
texture<short4, 1, hipReadModeNormalizedFloat> texHalf4;
texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt4;
texture<float, 1, hipReadModeElementType> texNorm4;
// Half precision input spinor field
texture<short4, 1, hipReadModeNormalizedFloat> texHalf5;
texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt5;
texture<float, 1, hipReadModeElementType> texNorm5;
#define checkSpinor(a, b) \
{ \
if (a.Precision() != b.Precision()) \
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) { \
errorQuda("engths do not match: %d %d", a.Length(), b.Length()); \
}
__global__ void convertDSKernel(double2 *dst, float4 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<6; k++) {
dst[2*k*length+i].x = src[k*length+i].x;
dst[2*k*length+i].y = src[k*length+i].y;
dst[(2*k+1)*length+i].x = src[k*length+i].z;
dst[(2*k+1)*length+i].y = src[k*length+i].w;
}
i += gridSize;
}
}
__global__ void convertDSKernel(double2 *dst, float2 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<3; k++) {
dst[k*length+i].x = src[k*length+i].x;
dst[k*length+i].y = src[k*length+i].y;
}
i += gridSize;
}
}
__global__ void convertSDKernel(float4 *dst, double2 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<6; k++) {
dst[k*length+i].x = src[2*k*length+i].x;
dst[k*length+i].y = src[2*k*length+i].y;
dst[k*length+i].z = src[(2*k+1)*length+i].x;
dst[k*length+i].w = src[(2*k+1)*length+i].y;
}
i += gridSize;
}
}
__global__ void convertSDKernel(float2 *dst, double2 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<3; k++) {
dst[k*length+i].x = src[k*length+i].x;
dst[k*length+i].y = src[k*length+i].y;
}
i += gridSize;
}
}
__global__ void convertHSKernel(short4 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
float4 F0 = tex1Dfetch(xTexSingle4, i + 0*length);
float4 F1 = tex1Dfetch(xTexSingle4, i + 1*length);
float4 F2 = tex1Dfetch(xTexSingle4, i + 2*length);
float4 F3 = tex1Dfetch(xTexSingle4, i + 3*length);
float4 F4 = tex1Dfetch(xTexSingle4, i + 4*length);
float4 F5 = tex1Dfetch(xTexSingle4, i + 5*length);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertHSKernel(short2 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
float2 F0 = tex1Dfetch(xTexSingle2, i + 0*length);
float2 F1 = tex1Dfetch(xTexSingle2, i + 1*length);
float2 F2 = tex1Dfetch(xTexSingle2, i + 2*length);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertSHKernel(float4 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i<real_length) {
RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length);
res[0*length+i] = I0;
res[1*length+i] = I1;
res[2*length+i] = I2;
res[3*length+i] = I3;
res[4*length+i] = I4;
res[5*length+i] = I5;
i += gridSize;
}
}
__global__ void convertSHKernel(float2 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i<real_length) {
RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length);
res[0*length+i] = I0;
res[1*length+i] = I1;
res[2*length+i] = I2;
i += gridSize;
}
}
__global__ void convertHDKernel(short4 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
double2 F0 = fetch_double2(xTexDouble2, i+0*length);
double2 F1 = fetch_double2(xTexDouble2, i+1*length);
double2 F2 = fetch_double2(xTexDouble2, i+2*length);
double2 F3 = fetch_double2(xTexDouble2, i+3*length);
double2 F4 = fetch_double2(xTexDouble2, i+4*length);
double2 F5 = fetch_double2(xTexDouble2, i+5*length);
double2 F6 = fetch_double2(xTexDouble2, i+6*length);
double2 F7 = fetch_double2(xTexDouble2, i+7*length);
double2 F8 = fetch_double2(xTexDouble2, i+8*length);
double2 F9 = fetch_double2(xTexDouble2, i+9*length);
double2 F10 = fetch_double2(xTexDouble2, i+10*length);
double2 F11 = fetch_double2(xTexDouble2, i+11*length);
CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertHDKernel(short2 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
double2 F0 = fetch_double2(xTexDouble2, i+0*length);
double2 F1 = fetch_double2(xTexDouble2, i+1*length);
double2 F2 = fetch_double2(xTexDouble2, i+2*length);
CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertDHKernel(double2 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length);
res[0*length+i] = make_double2(I0.x, I0.y);
res[1*length+i] = make_double2(I0.z, I0.w);
res[2*length+i] = make_double2(I1.x, I1.y);
res[3*length+i] = make_double2(I1.z, I1.w);
res[4*length+i] = make_double2(I2.x, I2.y);
res[5*length+i] = make_double2(I2.z, I2.w);
res[6*length+i] = make_double2(I3.x, I3.y);
res[7*length+i] = make_double2(I3.z, I3.w);
res[8*length+i] = make_double2(I4.x, I4.y);
res[9*length+i] = make_double2(I4.z, I4.w);
res[10*length+i] = make_double2(I5.x, I5.y);
res[11*length+i] = make_double2(I5.z, I5.w);
i += gridSize;
}
}
__global__ void convertDHKernelSt(double2 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length);
res[0*length+i] = make_double2(I0.x, I0.y);
res[1*length+i] = make_double2(I1.x, I1.y);
res[2*length+i] = make_double2(I2.x, I2.y);
i += gridSize;
}
}
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.Nspin() != 1 && src.Nspin() != 4){
errorQuda("nSpin(%d) not supported in function %s, line %d\n", src.Nspin(), __FUNCTION__, __LINE__);
}
if ((dst.Precision() == QUDA_HALF_PRECISION || src.Precision() == QUDA_HALF_PRECISION) &&
(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET)) {
copyCuda(dst.Even(), src.Even());
copyCuda(dst.Odd(), src.Odd());
return;
}
// For a given dst precision, there are two non-trivial possibilities for the
// src precision. The higher one corresponds to kernel index 0 (in the table
// of block and grid dimensions), while the lower one corresponds to index 1.
int id;
if (src.Precision() == QUDA_DOUBLE_PRECISION ||
dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
id = 0;
} else {
id = 1;
}
setBlock(id, dst.Stride(), dst.Precision());
quda::blas_bytes += src.RealLength()*((int)src.Precision() + (int)dst.Precision());
if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
hipLaunchKernelGGL(( convertDSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), (float4*)src.V(), src.Stride());
}else{ //src.Nspin() == 1
hipLaunchKernelGGL(( convertDSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), (float2*)src.V(), src.Stride());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
hipLaunchKernelGGL(( convertSDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float4*)dst.V(), (double2*)src.V(), src.Stride());
}else{ //src.Nspin() ==1
hipLaunchKernelGGL(( convertSDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)dst.V(), (double2*)src.V(), src.Stride());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
quda::blas_bytes += src.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(short);
if (src.Nspin() == 4){
hipBindTexture(0, texHalf1, src.V(), spinor_bytes);
hipBindTexture(0, texNorm1, src.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( convertSHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float4*)dst.V(), src.Stride(), src.Volume());
hipUnbindTexture(texHalf1);
hipUnbindTexture(texNorm1);
}else{ //nSpin== 1;
hipBindTexture(0, texHalfSt1, src.V(), spinor_bytes);
hipBindTexture(0, texNorm1, src.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( convertSHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)dst.V(), src.Stride(), src.Volume());
hipUnbindTexture(texHalfSt1);
hipUnbindTexture(texNorm1);
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
quda::blas_bytes += dst.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(float);
if (src.Nspin() == 4){
hipBindTexture(0, xTexSingle4, src.V(), spinor_bytes);
hipLaunchKernelGGL(( convertHSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
hipUnbindTexture(xTexSingle4);
}else{ //nSpinr == 1
hipBindTexture(0, xTexSingle2, src.V(), spinor_bytes);
hipLaunchKernelGGL(( convertHSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
hipUnbindTexture(xTexSingle2);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
quda::blas_bytes += src.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(short);
if (src.Nspin() == 4){
hipBindTexture(0, texHalf1, src.V(), spinor_bytes);
hipBindTexture(0, texNorm1, src.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( convertDHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), src.Stride(), src.Volume());
hipUnbindTexture(texHalf1);
hipUnbindTexture(texNorm1);
}else{//nSpinr == 1
hipBindTexture(0, texHalfSt1, src.V(), spinor_bytes);
hipBindTexture(0, texNorm1, src.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( convertDHKernelSt), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), src.Stride(), src.Volume());
hipUnbindTexture(texHalfSt1);
hipUnbindTexture(texNorm1);
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
quda::blas_bytes += dst.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, src.V(), spinor_bytes);
if (src.Nspin() == 4){
hipLaunchKernelGGL(( convertHDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
}else{ //nSpinr == 1
hipLaunchKernelGGL(( convertHDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
}
hipUnbindTexture(xTexDouble2);
} else {
hipMemcpy(dst.V(), src.V(), dst.Bytes(), hipMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
hipMemcpy(dst.Norm(), src.Norm(), dst.Bytes()/(dst.Ncolor()*dst.Nspin()), hipMemcpyDeviceToDevice);
quda::blas_bytes += 2*dst.RealLength()*sizeof(float);
}
}
hipDeviceSynchronize();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpbyKernel(Float a, Float2 *x, Float b, Float2 *y, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
y[i] = a*x[i] + b*y[i];
i += gridSize;
}
}
__global__ void axpbyHKernel(float a, float b, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AXPBY_FLOAT4(a, x0, b, y0);
AXPBY_FLOAT4(a, x1, b, y1);
AXPBY_FLOAT4(a, x2, b, y2);
AXPBY_FLOAT4(a, x3, b, y3);
AXPBY_FLOAT4(a, x4, b, y4);
AXPBY_FLOAT4(a, x5, b, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void axpbyHKernel(float a, float b, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
AXPBY_FLOAT2(a, x0, b, y0);
AXPBY_FLOAT2(a, x1, b, y1);
AXPBY_FLOAT2(a, x2, b, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = a*x[i] + b*y[i]
void axpbyCuda(const double &a, cudaColorSpinorField &x, const double &b, cudaColorSpinorField &y) {
setBlock(2, x.Length(), x.Precision());
checkSpinor(x, y);
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
hipLaunchKernelGGL(( axpbyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double*)x.V(), b, (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( axpbyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float)b, (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axpbyCuda(a, x.Even(), b, y.Even());
axpbyCuda(a, x.Odd(), b, y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( axpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short4*)y.V(),
(float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1) {//staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( axpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short2*)y.V(),
(float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 3*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float>
__global__ void xpyKernel(Float *x, Float *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] += x[i];
i += gridSize;
}
}
__global__ void xpyHKernel(short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
XPY_FLOAT4(x0, y0);
XPY_FLOAT4(x1, y1);
XPY_FLOAT4(x2, y2);
XPY_FLOAT4(x3, y3);
XPY_FLOAT4(x4, y4);
XPY_FLOAT4(x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void xpyHKernel(short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
XPY_FLOAT2(x0, y0);
XPY_FLOAT2(x1, y1);
XPY_FLOAT2(x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = x[i] + y[i]
void xpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(3, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
hipLaunchKernelGGL(( xpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double*)x.V(), (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( xpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
xpyCuda(x.Even(), y.Even());
xpyCuda(x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( xpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( xpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpyKernel(Float a, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] += a*x[i];
i += gridSize;
}
}
__global__ void axpyHKernel(float a, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AXPY_FLOAT4(a, x0, y0);
AXPY_FLOAT4(a, x1, y1);
AXPY_FLOAT4(a, x2, y2);
AXPY_FLOAT4(a, x3, y3);
AXPY_FLOAT4(a, x4, y4);
AXPY_FLOAT4(a, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void axpyHKernel(float a, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
AXPY_FLOAT2(a, x0, y0);
AXPY_FLOAT2(a, x1, y1);
AXPY_FLOAT2(a, x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = a*x[i] + y[i]
void axpyCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(4, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
hipLaunchKernelGGL(( axpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double*)x.V(), (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( axpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axpyCuda(a, x.Even(), y.Even());
axpyCuda(a, x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( axpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( axpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 2*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void xpayKernel(const Float2 *x, Float a, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] = x[i] + a*y[i];
i += gridSize;
}
}
__global__ void xpayHKernel(float a, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
XPAY_FLOAT4(x0, a, y0);
XPAY_FLOAT4(x1, a, y1);
XPAY_FLOAT4(x2, a, y2);
XPAY_FLOAT4(x3, a, y3);
XPAY_FLOAT4(x4, a, y4);
XPAY_FLOAT4(x5, a, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void xpayHKernel(float a, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
XPAY_FLOAT2(x0, a, y0);
XPAY_FLOAT2(x1, a, y1);
XPAY_FLOAT2(x2, a, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = x[i] + a*y[i]
void xpayCuda(const cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(5, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
hipLaunchKernelGGL(( xpayKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double*)x.V(), a, (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( xpayKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), (float)a, (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
xpayCuda(x.Even(), a, y.Even());
xpayCuda(x.Odd(), a, y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( xpayHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() ==1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( xpayHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 2*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float>
__global__ void mxpyKernel(Float *x, Float *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] -= x[i];
i += gridSize;
}
}
__global__ void mxpyHKernel(short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
MXPY_FLOAT4(x0, y0);
MXPY_FLOAT4(x1, y1);
MXPY_FLOAT4(x2, y2);
MXPY_FLOAT4(x3, y3);
MXPY_FLOAT4(x4, y4);
MXPY_FLOAT4(x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void mxpyHKernel(short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
MXPY_FLOAT2(x0, y0);
MXPY_FLOAT2(x1, y1);
MXPY_FLOAT2(x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] -= x[i] (minus x plus y)
void mxpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(6, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
hipLaunchKernelGGL(( mxpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double*)x.V(), (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( mxpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
mxpyCuda(x.Even(), y.Even());
mxpyCuda(x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( mxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1) { //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( mxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axKernel(Float a, Float2 *x, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
x[i] *= a;
i += gridSize;
}
}
__global__ void axHKernel(float a, short4 *xH, float *xN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
AX_FLOAT4(a, x0); AX_FLOAT4(a, x1); AX_FLOAT4(a, x2);
AX_FLOAT4(a, x3); AX_FLOAT4(a, x4); AX_FLOAT4(a, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
i += gridSize;
}
}
__global__ void axHKernel(float a, short2 *xH, float *xN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
AX_FLOAT2(a, x0); AX_FLOAT2(a, x1); AX_FLOAT2(a, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
i += gridSize;
}
}
// performs the operation x[i] = a*x[i]
void axCuda(const double &a, cudaColorSpinorField &x) {
setBlock(7, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
hipLaunchKernelGGL(( axKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double*)x.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( axKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axCuda(a, x.Even());
axCuda(a, x.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( axHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short4*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume());
}else if (x.Nspin() ==1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( axHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short2*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 2*x.Volume()*sizeof(float);
}
quda::blas_bytes += 2*x.RealLength()*x.Precision();
quda::blas_flops += x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpyDKernel(Float2 a, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z = READ_DOUBLE2_TEXTURE(x, i);
y[i].x += a.x*Z.x - a.y*Z.y;
y[i].y += a.y*Z.x + a.x*Z.y;
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpySKernel(Float2 a, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z = read_Float2(x, i);
y[i].x += a.x*Z.x - a.y*Z.y;
y[i].y += a.y*Z.x + a.x*Z.y;
i += gridSize;
}
}
__global__ void caxpyHKernel(float2 a, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
CAXPY_FLOAT4(a, x0, y0);
CAXPY_FLOAT4(a, x1, y1);
CAXPY_FLOAT4(a, x2, y2);
CAXPY_FLOAT4(a, x3, y3);
CAXPY_FLOAT4(a, x4, y4);
CAXPY_FLOAT4(a, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void caxpyHKernel(float2 a, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
CAXPY_FLOAT2(a, x0, y0);
CAXPY_FLOAT2(a, x1, y1);
CAXPY_FLOAT2(a, x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] += a*x[i]
void caxpyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
int length = x.Length()/2;
setBlock(8, length, x.Precision());
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 4*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
hipLaunchKernelGGL(( caxpyDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), (double2*)y.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
hipLaunchKernelGGL(( caxpySKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), (float2*)y.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpyCuda(a, x.Even(), y.Even());
caxpyCuda(a, x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
hipLaunchKernelGGL(( caxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
hipLaunchKernelGGL(( caxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpbyDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z1 = READ_DOUBLE2_TEXTURE(x, i);
Float2 Z2 = READ_DOUBLE2_TEXTURE(y, i);
y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y;
y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y;
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbySKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z1 = read_Float2(x, i);
Float2 Z2 = read_Float2(y, i);
y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y;
y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y;
i += gridSize;
}
}
__global__ void caxpbyHKernel(float2 a, float2 b, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
CAXPBY_FLOAT4(a, x0, b, y0);
CAXPBY_FLOAT4(a, x1, b, y1);
CAXPBY_FLOAT4(a, x2, b, y2);
CAXPBY_FLOAT4(a, x3, b, y3);
CAXPBY_FLOAT4(a, x4, b, y4);
CAXPBY_FLOAT4(a, x5, b, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void caxpbyHKernel(float2 a, float2 b, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
CAXPBY_FLOAT2(a, x0, b, y0);
CAXPBY_FLOAT2(a, x1, b, y1);
CAXPBY_FLOAT2(a, x2, b, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = c*x[i] + b*y[i]
void caxpbyCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y) {
checkSpinor(x,y);
int length = x.Length()/2;
setBlock(9, length, x.Precision());
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 7*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipLaunchKernelGGL(( caxpbyDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( caxpbySKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbyCuda(a, x.Even(), b, y.Even());
caxpbyCuda(a, x.Odd(), b, y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( caxpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( caxpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void cxpaypbzDKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 T1 = READ_DOUBLE2_TEXTURE(x, i);
Float2 T2 = READ_DOUBLE2_TEXTURE(y, i);
Float2 T3 = read_Float2(z, i);
T1.x += a.x*T2.x - a.y*T2.y;
T1.y += a.y*T2.x + a.x*T2.y;
T1.x += b.x*T3.x - b.y*T3.y;
T1.y += b.y*T3.x + b.x*T3.y;
z[i] = make_Float2(T1);
i += gridSize;
}
}
template <typename Float2>
__global__ void cxpaypbzSKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 T1 = read_Float2(x, i);
Float2 T2 = read_Float2(y, i);
Float2 T3 = read_Float2(z, i);
T1.x += a.x*T2.x - a.y*T2.y;
T1.y += a.y*T2.x + a.x*T2.y;
T1.x += b.x*T3.x - b.y*T3.y;
T1.y += b.y*T3.x + b.x*T3.y;
z[i] = make_Float2(T1);
i += gridSize;
}
}
__global__ void cxpaypbzHKernel(float2 a, float2 b, short4 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CXPAYPBZ_FLOAT4(x0, a, y0, b, z0);
CXPAYPBZ_FLOAT4(x1, a, y1, b, z1);
CXPAYPBZ_FLOAT4(x2, a, y2, b, z2);
CXPAYPBZ_FLOAT4(x3, a, y3, b, z3);
CXPAYPBZ_FLOAT4(x4, a, y4, b, z4);
CXPAYPBZ_FLOAT4(x5, a, y5, b, z5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride);
i += gridSize;
}
}
__global__ void cxpaypbzHKernel(float2 a, float2 b, short2 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CXPAYPBZ_FLOAT2(x0, a, y0, b, z0);
CXPAYPBZ_FLOAT2(x1, a, y1, b, z1);
CXPAYPBZ_FLOAT2(x2, a, y2, b, z2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride);
i += gridSize;
}
}
// performs the operation z[i] = x[i] + a*y[i] + b*z[i]
void cxpaypbzCuda(cudaColorSpinorField &x, const quda::Complex &a, cudaColorSpinorField &y,
const quda::Complex &b, cudaColorSpinorField &z) {
checkSpinor(x,y);
checkSpinor(x,z);
int length = x.Length()/2;
setBlock(10, length, x.Precision());
quda::blas_bytes += 4*x.RealLength()*x.Precision();
quda::blas_flops += 8*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipLaunchKernelGGL(( cxpaypbzDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)x.V(), a2, (double2*)y.V(), b2, (double2*)z.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( cxpaypbzSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), a2, (float2*)y.V(), b2, (float2*)z.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
cxpaypbzCuda(x.Even(), a, y.Even(), b, z.Even());
cxpaypbzCuda(x.Odd(), a, y.Odd(), b, z.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 4*x.Volume()*sizeof(float);
if (x.Nspin() ==4 ){//wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( cxpaypbzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
} else if (x.Nspin() ==1 ){//staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( cxpaypbzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpyBzpcxDKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = READ_DOUBLE2_TEXTURE(x, i);
Float2 z_i = READ_DOUBLE2_TEXTURE(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = b*z_i.x + c*x_i.x;
x[i].y = b*z_i.y + c*x_i.y;
i += gridSize;
}
}
template <typename Float, typename Float2>
__global__ void axpyBzpcxSKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = read_Float2(x, i);
Float2 z_i = read_Float2(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = b*z_i.x + c*x_i.x;
x[i].y = b*z_i.y + c*x_i.y;
i += gridSize;
}
}
__global__ void axpyBzpcxHKernel(float a, float b, float c, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
AXPY_FLOAT4(a, x0, y0);
AXPBY_FLOAT4(b, z0, c, x0);
AXPY_FLOAT4(a, x1, y1);
AXPBY_FLOAT4(b, z1, c, x1);
AXPY_FLOAT4(a, x2, y2);
AXPBY_FLOAT4(b, z2, c, x2);
AXPY_FLOAT4(a, x3, y3);
AXPBY_FLOAT4(b, z3, c, x3);
AXPY_FLOAT4(a, x4, y4);
AXPBY_FLOAT4(b, z4, c, x4);
AXPY_FLOAT4(a, x5, y5);
AXPBY_FLOAT4(b, z5, c, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
i += gridSize;
}
}
__global__ void axpyBzpcxHKernel(float a, float b, float c, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
AXPY_FLOAT2(a, x0, y0);
AXPBY_FLOAT2(b, z0, c, x0);
AXPY_FLOAT2(a, x1, y1);
AXPBY_FLOAT2(b, z1, c, x1);
AXPY_FLOAT2(a, x2, y2);
AXPBY_FLOAT2(b, z2, c, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
i += gridSize;
}
}
// performs the operations: {y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i]}
void axpyBzpcxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y, const double &b,
cudaColorSpinorField& z, const double &c)
{
checkSpinor(x,y);
checkSpinor(x,z);
setBlock(11, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
hipLaunchKernelGGL(( axpyBzpcxDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double2*)x.V(), (double2*)y.V(), b, (double2*)z.V(), c, x.Length()/2);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( axpyBzpcxSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float2*)y.V(), (float)b, (float2*)z.V(), (float)c, x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET){
axpyBzpcxCuda(a, x.Even(), y.Even(), b, z.Even(), c);
axpyBzpcxCuda(a, x.Odd(), y.Odd(), b, z.Odd(), c);
return ;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( axpyBzpcxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (float)c, (short4*)x.V(), (float*)x.Norm(),
(short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( axpyBzpcxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (float)c, (short2*)x.V(), (float*)x.Norm(),
(short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 5*x.Volume()*sizeof(float);
}
quda::blas_bytes += 5*x.RealLength()*x.Precision();
quda::blas_flops += 10*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpyZpbxDKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = READ_DOUBLE2_TEXTURE(x, i);
Float2 z_i = READ_DOUBLE2_TEXTURE(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = z_i.x + b*x_i.x;
x[i].y = z_i.y + b*x_i.y;
i += gridSize;
}
}
template <typename Float, typename Float2>
__global__ void axpyZpbxSKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = read_Float2(x, i);
Float2 z_i = read_Float2(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = z_i.x + b*x_i.x;
x[i].y = z_i.y + b*x_i.y;
i += gridSize;
}
}
__global__ void axpyZpbxHKernel(float a, float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AXPY_FLOAT4(a, x0, y0);
AXPY_FLOAT4(a, x1, y1);
AXPY_FLOAT4(a, x2, y2);
AXPY_FLOAT4(a, x3, y3);
AXPY_FLOAT4(a, x4, y4);
AXPY_FLOAT4(a, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
XPAY_FLOAT4(z0, b, x0);
XPAY_FLOAT4(z1, b, x1);
XPAY_FLOAT4(z2, b, x2);
XPAY_FLOAT4(z3, b, x3);
XPAY_FLOAT4(z4, b, x4);
XPAY_FLOAT4(z5, b, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
i += gridSize;
}
}
__global__ void axpyZpbxHKernel(float a, float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
AXPY_FLOAT2(a, x0, y0);
XPAY_FLOAT2(z0, b, x0);
AXPY_FLOAT2(a, x1, y1);
XPAY_FLOAT2(z1, b, x1);
AXPY_FLOAT2(a, x2, y2);
XPAY_FLOAT2(z2, b, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
i += gridSize;
}
}
// performs the operations: {y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i]}
void axpyZpbxCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y,
cudaColorSpinorField &z, const double &b) {
checkSpinor(x,y);
checkSpinor(x,z);
setBlock(12, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
hipLaunchKernelGGL(( axpyZpbxDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0,
a, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), b, x.Length()/2);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
hipLaunchKernelGGL(( axpyZpbxSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0,
(float)a, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), (float)b, x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axpyZpbxCuda(a, x.Even(), y.Even(), z.Even(), b);
axpyZpbxCuda(a, x.Odd(), y.Odd(), z.Odd(), b);
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() ==4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( axpyZpbxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short4*)x.V(), (float*)x.Norm(),
(short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( axpyZpbxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short2*)x.V(), (float*)x.Norm(),
(short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 5*x.Volume()*sizeof(float);
}
quda::blas_bytes += 5*x.RealLength()*x.Precision();
quda::blas_flops += 8*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpbypzYmbwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = READ_DOUBLE2_TEXTURE(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = READ_DOUBLE2_TEXTURE(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
Float2 W = read_Float2(w, i);
Y.x -= b.x*W.x - b.y*W.y;
Y.y -= b.y*W.x + b.x*W.y;
y[i] = make_Float2(Y);
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbypzYmbwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = read_Float2(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = read_Float2(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
Float2 W = read_Float2(w, i);
Y.x -= b.x*W.x - b.y*W.y;
Y.y -= b.y*W.x + b.x*W.y;
y[i] = make_Float2(Y);
i += gridSize;
}
}
__global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN,
short4 *zH, float *zN, float *wN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CAXPBYPZ_FLOAT4(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT4(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT4(a, x2, b, y2, z2);
CAXPBYPZ_FLOAT4(a, x3, b, y3, z3);
CAXPBYPZ_FLOAT4(a, x4, b, y4, z4);
CAXPBYPZ_FLOAT4(a, x5, b, y5, z5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride);
READ_HALF_SPINOR(w, texHalf4, stride);
float2 b2 = -wc*b;
CAXPY_FLOAT4(b2, w0, y0);
CAXPY_FLOAT4(b2, w1, y1);
CAXPY_FLOAT4(b2, w2, y2);
CAXPY_FLOAT4(b2, w3, y3);
CAXPY_FLOAT4(b2, w4, y4);
CAXPY_FLOAT4(b2, w5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN,
short2 *zH, float *zN, float *wN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CAXPBYPZ_FLOAT2(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT2(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT2(a, x2, b, y2, z2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride);
READ_HALF_SPINOR_ST(w, texHalfSt4, stride);
float2 b2 = -wc*b;
CAXPY_FLOAT2(b2, w0, y0);
CAXPY_FLOAT2(b2, w1, y1);
CAXPY_FLOAT2(b2, w2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i]
void caxpbypzYmbwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y,
cudaColorSpinorField &z, cudaColorSpinorField &w) {
checkSpinor(x,y);
checkSpinor(x,z);
checkSpinor(x,w);
int length = x.Length()/2;
setBlock(13, length, x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
hipLaunchKernelGGL(( caxpbypzYmbwDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(),
(double2*)z.V(), (double2*)w.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( caxpbypzYmbwSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(),
(float2*)z.V(), (float2*)w.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbypzYmbwCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even());
caxpbypzYmbwCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 6*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf4, w.V(), spinor_bytes);
hipBindTexture(0, texNorm4, w.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( caxpbypzYmbwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(),
z.Stride(), z.Volume());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt4, w.V(), spinor_bytes);
hipBindTexture(0, texNorm4, w.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( caxpbypzYmbwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(),
z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
quda::blas_bytes += 6*x.RealLength()*x.Precision();
quda::blas_flops += 12*x.RealLength();
if (!blasTuning) checkCudaError();
}
#if (__CUDA_ARCH__ < 130)
// Computes c = a + b in "double single" precision.
__device__ void dsadd(volatile QudaSumFloat &c0, volatile QudaSumFloat &c1, const volatile QudaSumFloat &a0,
const volatile QudaSumFloat &a1, const float b0, const float b1) {
// Compute dsa + dsb using Knuth's trick.
QudaSumFloat t1 = a0 + b0;
QudaSumFloat e = t1 - a0;
QudaSumFloat t2 = ((b0 - e) + (a0 - (t1 - e))) + a1 + b1;
// The result is t1 + t2, after normalization.
c0 = e = t1 + t2;
c1 = t2 - (e - t1);
}
// Computes c = a + b in "double single" precision (complex version)
__device__ void zcadd(volatile QudaSumComplex &c0, volatile QudaSumComplex &c1, const volatile QudaSumComplex &a0,
const volatile QudaSumComplex &a1, const volatile QudaSumComplex &b0, const volatile QudaSumComplex &b1) {
// Compute dsa + dsb using Knuth's trick.
QudaSumFloat t1 = a0.x + b0.x;
QudaSumFloat e = t1 - a0.x;
QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x;
// The result is t1 + t2, after normalization.
c0.x = e = t1 + t2;
c1.x = t2 - (e - t1);
// Compute dsa + dsb using Knuth's trick.
t1 = a0.y + b0.y;
e = t1 - a0.y;
t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y;
// The result is t1 + t2, after normalization.
c0.y = e = t1 + t2;
c1.y = t2 - (e - t1);
}
// Computes c = a + b in "double single" precision (float3 version)
__device__ void dsadd3(volatile QudaSumFloat3 &c0, volatile QudaSumFloat3 &c1, const volatile QudaSumFloat3 &a0,
const volatile QudaSumFloat3 &a1, const volatile QudaSumFloat3 &b0, const volatile QudaSumFloat3 &b1) {
// Compute dsa + dsb using Knuth's trick.
QudaSumFloat t1 = a0.x + b0.x;
QudaSumFloat e = t1 - a0.x;
QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x;
// The result is t1 + t2, after normalization.
c0.x = e = t1 + t2;
c1.x = t2 - (e - t1);
// Compute dsa + dsb using Knuth's trick.
t1 = a0.y + b0.y;
e = t1 - a0.y;
t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y;
// The result is t1 + t2, after normalization.
c0.y = e = t1 + t2;
c1.y = t2 - (e - t1);
// Compute dsa + dsb using Knuth's trick.
t1 = a0.z + b0.z;
e = t1 - a0.z;
t2 = ((b0.z - e) + (a0.z - (t1 - e))) + a1.z + b1.z;
// The result is t1 + t2, after normalization.
c0.z = e = t1 + t2;
c1.z = t2 - (e - t1);
}
#endif
//
// double normCuda(float *a, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normD##suffix
#define REDUCE_TYPES Float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*a[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normS##suffix
#define REDUCE_TYPES Float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// double normHCuda(char *, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normH##suffix
#define REDUCE_TYPES Float *aN, int stride // dummy type
#define REDUCE_PARAMS aN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR(a, texHalf1, stride); \
REAL_DOT_FLOAT4(norm0, a0, a0); \
REAL_DOT_FLOAT4(norm1, a1, a1); \
REAL_DOT_FLOAT4(norm2, a2, a2); \
REAL_DOT_FLOAT4(norm3, a3, a3); \
REAL_DOT_FLOAT4(norm4, a4, a4); \
REAL_DOT_FLOAT4(norm5, a5, a5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4;
#define REDUCE_OPERATION(i) (ac*ac*norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normHSt##suffix
#define REDUCE_TYPES Float *aN, int stride // dummy type
#define REDUCE_PARAMS aN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \
REAL_DOT_FLOAT2(norm0, a0, a0); \
REAL_DOT_FLOAT2(norm1, a1, a1); \
REAL_DOT_FLOAT2(norm2, a2, a2); \
norm0 += norm1; norm0 += norm2;
#define REDUCE_OPERATION(i) (ac*ac*norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double normCuda(const cudaColorSpinorField &a) {
if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) return normCuda(a.Even()) + normCuda(a.Odd());
const int id = 14;
quda::blas_flops += 2*a.RealLength();
quda::blas_bytes += a.RealLength()*a.Precision();
if (a.Precision() == QUDA_DOUBLE_PRECISION) {
return normDCuda((double*)a.V(), a.Length(), id, a.Precision());
} else if (a.Precision() == QUDA_SINGLE_PRECISION) {
return normSCuda((float2*)a.V(), a.Length()/2, id, a.Precision());
} else {
int spinor_bytes = a.Length()*sizeof(short);
int half_norm_ratio = (a.Ncolor()*a.Nspin()*2*sizeof(short))/sizeof(float);
quda::blas_bytes += (a.RealLength()*a.Precision()) / (a.Ncolor() * a.Nspin());
hipBindTexture(0, texNorm1, a.Norm(), spinor_bytes/half_norm_ratio);
if (a.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, a.V(), spinor_bytes);
return normHCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else if (a.Nspin() == 1) { //staggered
hipBindTexture(0, texHalfSt1, a.V(), spinor_bytes);
return normHStCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin());
return 0;
}
}
}
//
// double reDotProductFCuda(float *a, float *b, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductD##suffix
#define REDUCE_TYPES Float *a, Float *b
#define REDUCE_PARAMS a, b
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*b[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductS##suffix
#define REDUCE_TYPES Float *a, Float *b
#define REDUCE_PARAMS a, b
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// double reDotProductHCuda(float *a, float *b, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductH##suffix
#define REDUCE_TYPES Float *aN, Float *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR(a, texHalf1, stride); \
READ_HALF_SPINOR(b, texHalf2, stride); \
REAL_DOT_FLOAT4(rdot0, a0, b0); \
REAL_DOT_FLOAT4(rdot1, a1, b1); \
REAL_DOT_FLOAT4(rdot2, a2, b2); \
REAL_DOT_FLOAT4(rdot3, a3, b3); \
REAL_DOT_FLOAT4(rdot4, a4, b4); \
REAL_DOT_FLOAT4(rdot5, a5, b5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_OPERATION(i) (ac*bc*rdot0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductHSt##suffix
#define REDUCE_TYPES Float *aN, Float *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \
REAL_DOT_FLOAT2(rdot0, a0, b0); \
REAL_DOT_FLOAT2(rdot1, a1, b1); \
REAL_DOT_FLOAT2(rdot2, a2, b2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_OPERATION(i) (ac*bc*rdot0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double reDotProductCuda(cudaColorSpinorField &a, cudaColorSpinorField &b) {
if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
return reDotProductCuda(a.Even(), b.Even()) + reDotProductCuda(a.Odd(), b.Odd());
}
const int id = 15;
quda::blas_flops += 2*a.RealLength();
checkSpinor(a, b);
quda::blas_bytes += 2*a.RealLength()*a.Precision();
if (a.Precision() == QUDA_DOUBLE_PRECISION) {
return reDotProductDCuda((double*)a.V(), (double*)b.V(), a.Length(), id, a.Precision());
} else if (a.Precision() == QUDA_SINGLE_PRECISION) {
return reDotProductSCuda((float2*)a.V(), (float2*)b.V(), a.Length()/2, id, a.Precision());
} else {
quda::blas_bytes += 2*a.Volume()*sizeof(float);
int spinor_bytes = a.Length()*sizeof(short);
if (a.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, a.V(), spinor_bytes);
hipBindTexture(0, texNorm1, a.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, b.V(), spinor_bytes);
hipBindTexture(0, texNorm2, b.Norm(), spinor_bytes/12);
return reDotProductHCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else if (a.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, a.V(), spinor_bytes);
hipBindTexture(0, texNorm1, a.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, b.V(), spinor_bytes);
hipBindTexture(0, texNorm2, b.Norm(), spinor_bytes/3);
return reDotProductHStCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin());
return 0;
}
}
}
//
// double axpyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) axpyNormF##suffix
#define REDUCE_TYPES Float a, Float *x, Float *y
#define REDUCE_PARAMS a, x, y
#define REDUCE_AUXILIARY(i) y[i] = a*x[i] + y[i]
#define REDUCE_OPERATION(i) (y[i]*y[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix
#define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
AXPY_FLOAT4(a, x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
AXPY_FLOAT4(a, x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
AXPY_FLOAT4(a, x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
AXPY_FLOAT4(a, x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
AXPY_FLOAT4(a, x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
AXPY_FLOAT4(a, x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix
#define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
AXPY_FLOAT2(a, x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
AXPY_FLOAT2(a, x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
AXPY_FLOAT2(a, x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double axpyNormCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return axpyNormCuda(a, x.Even(), y.Even()) + axpyNormCuda(a, x.Odd(), y.Odd());
const int id = 16;
quda::blas_flops += 4*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 3*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
return axpyNormFCuda(a, (double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return axpyNormFCuda((float)a, (float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision());
} else {
hipBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), x.Bytes());
hipBindTexture(0, texHalf2, y.V(), x.Bytes());
return axpyNormHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), x.Bytes());
hipBindTexture(0, texHalfSt2, y.V(), x.Bytes());
return axpyNormHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
//
// double xmyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = x[i] - y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) xmyNormF##suffix
#define REDUCE_TYPES Float *x, Float *y
#define REDUCE_PARAMS x, y
#define REDUCE_AUXILIARY(i) y[i] = x[i] - y[i]
#define REDUCE_OPERATION(i) (y[i]*y[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix
#define REDUCE_TYPES Float *d1, Float *d2, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS d1, d2, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
XMY_FLOAT4(x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
XMY_FLOAT4(x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
XMY_FLOAT4(x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
XMY_FLOAT4(x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
XMY_FLOAT4(x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
XMY_FLOAT4(x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix
#define REDUCE_TYPES Float *d1, Float *d2, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS d1, d2, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
XMY_FLOAT2(x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
XMY_FLOAT2(x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
XMY_FLOAT2(x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double xmyNormCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return xmyNormCuda(x.Even(), y.Even()) + xmyNormCuda(x.Odd(), y.Odd());
const int id = 17;
quda::blas_flops += 3*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 3*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
return xmyNormFCuda((double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return xmyNormFCuda((float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision());
} else {
hipBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() ==4 ){ //wilsin
hipBindTexture(0, texHalf1, x.V(), x.Bytes());
hipBindTexture(0, texHalf2, y.V(), x.Bytes());
return xmyNormHCuda((char*)0, (char*)0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){
hipBindTexture(0, texHalfSt1, x.V(), x.Bytes());
hipBindTexture(0, texHalfSt2, y.V(), x.Bytes());
return xmyNormHCuda((char*)0, (char*)0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
//
// double2 cDotProductCuda(float2 *x, float2 *y, int n) {}
//
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductD##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y, Float c
#define REDUCE_PARAMS x, y, c
#define REDUCE_REAL_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i);
#define REDUCE_IMAG_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i);
#define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductS##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y, Float c
#define REDUCE_PARAMS x, y, c
#define REDUCE_REAL_AUXILIARY(i) Float2 a = read_Float2(x, i);
#define REDUCE_IMAG_AUXILIARY(i) Float2 b = read_Float2(y, i);
#define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductH##suffix
#define REDUCE_TYPES Float *aN, Float2 *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_REAL_AUXILIARY(i) \
READ_HALF_SPINOR(a, texHalf1, stride); \
READ_HALF_SPINOR(b, texHalf2, stride); \
REAL_DOT_FLOAT4(rdot0, a0, b0); \
REAL_DOT_FLOAT4(rdot1, a1, b1); \
REAL_DOT_FLOAT4(rdot2, a2, b2); \
REAL_DOT_FLOAT4(rdot3, a3, b3); \
REAL_DOT_FLOAT4(rdot4, a4, b4); \
REAL_DOT_FLOAT4(rdot5, a5, b5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, a0, b0); \
IMAG_DOT_FLOAT4(idot1, a1, b1); \
IMAG_DOT_FLOAT4(idot2, a2, b2); \
IMAG_DOT_FLOAT4(idot3, a3, b3); \
IMAG_DOT_FLOAT4(idot4, a4, b4); \
IMAG_DOT_FLOAT4(idot5, a5, b5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0)
#define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductHSt##suffix
#define REDUCE_TYPES Float *aN, Float2 *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_REAL_AUXILIARY(i) \
READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \
REAL_DOT_FLOAT2(rdot0, a0, b0); \
REAL_DOT_FLOAT2(rdot1, a1, b1); \
REAL_DOT_FLOAT2(rdot2, a2, b2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, a0, b0); \
IMAG_DOT_FLOAT2(idot1, a1, b1); \
IMAG_DOT_FLOAT2(idot2, a2, b2); \
idot0 += idot1; idot0 += idot2;
#define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0)
#define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
quda::Complex cDotProductCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cDotProductCuda(x.Even(), y.Even()) + cDotProductCuda(x.Odd(), y.Odd());
const int id = 18;
quda::blas_flops += 4*x.RealLength();
checkSpinor(x,y);
int length = x.Length()/2;
quda::blas_bytes += 2*x.RealLength()*x.Precision();
double2 dot;
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
char c = 0;
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
dot = cDotProductDCuda((double2*)x.V(), (double2*)y.V(), c, length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
char c = 0;
int spinor_bytes = x.Length()*sizeof(float);
hipBindTexture(0, xTexSingle2, x.V(), spinor_bytes);
hipBindTexture(0, yTexSingle2, y.V(), spinor_bytes);
dot = cDotProductSCuda((float2*)x.V(), (float2*)y.V(), c, length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 2*x.Volume()*sizeof(float);
if (x.Nspin() == 4){
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
dot = cDotProductHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
dot = cDotProductHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
return quda::Complex(dot.x, dot.y);
}
//
// double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {}
//
// First performs the operation y = x + a*y
// Second returns complex dot product (z,y)
//
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyD##suffix
#define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z
#define REDUCE_PARAMS x, a, y, z
#define REDUCE_REAL_AUXILIARY(i) \
Float2 X = READ_DOUBLE2_TEXTURE(x, i); \
Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \
Float2 Z = READ_DOUBLE2_TEXTURE(z, i);
#define REDUCE_IMAG_AUXILIARY(i) y[i].x = X.x + a*Y.x; y[i].y = X.y + a*Y.y
#define REDUCE_REAL_OPERATION(i) (Z.x*y[i].x + Z.y*y[i].y)
#define REDUCE_IMAG_OPERATION(i) (Z.x*y[i].y - Z.y*y[i].x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyS##suffix
#define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z
#define REDUCE_PARAMS x, a, y, z
#define REDUCE_REAL_AUXILIARY(i) y[i].x = x[i].x + a*y[i].x
#define REDUCE_IMAG_AUXILIARY(i) y[i].y = x[i].y + a*y[i].y
#define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y)
#define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix
#define REDUCE_TYPES Float a, short4 *yH, Float2 *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
XPAY_FLOAT4(x0, a, y0); \
XPAY_FLOAT4(x1, a, y1); \
XPAY_FLOAT4(x2, a, y2); \
XPAY_FLOAT4(x3, a, y3); \
XPAY_FLOAT4(x4, a, y4); \
XPAY_FLOAT4(x5, a, y5); \
REAL_DOT_FLOAT4(rdot0, z0, y0); \
REAL_DOT_FLOAT4(rdot1, z1, y1); \
REAL_DOT_FLOAT4(rdot2, z2, y2); \
REAL_DOT_FLOAT4(rdot3, z3, y3); \
REAL_DOT_FLOAT4(rdot4, z4, y4); \
REAL_DOT_FLOAT4(rdot5, z5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, z0, y0); \
IMAG_DOT_FLOAT4(idot1, z1, y1); \
IMAG_DOT_FLOAT4(idot2, z2, y2); \
IMAG_DOT_FLOAT4(idot3, z3, y3); \
IMAG_DOT_FLOAT4(idot4, z4, y4); \
IMAG_DOT_FLOAT4(idot5, z5, y5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix
#define REDUCE_TYPES Float a, short2 *yH, Float2 *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
XPAY_FLOAT2(x0, a, y0); \
XPAY_FLOAT2(x1, a, y1); \
XPAY_FLOAT2(x2, a, y2); \
REAL_DOT_FLOAT2(rdot0, z0, y0); \
REAL_DOT_FLOAT2(rdot1, z1, y1); \
REAL_DOT_FLOAT2(rdot2, z2, y2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, z0, y0); \
IMAG_DOT_FLOAT2(idot1, z1, y1); \
IMAG_DOT_FLOAT2(idot2, z2, y2); \
idot0 += idot1; idot0 += idot2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
quda::Complex xpaycDotzyCuda(cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y, cudaColorSpinorField &z) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return xpaycDotzyCuda(x.Even(), a, y.Even(), z.Even()) + xpaycDotzyCuda(x.Odd(), a, y.Odd(), z.Odd());
const int id = 19;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
checkSpinor(x,z);
int length = x.Length()/2;
quda::blas_bytes += 4*x.RealLength()*x.Precision();
double2 dot;
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
dot = xpaycDotzyDCuda((double2*)x.V(), a, (double2*)y.V(), (double2*)z.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
dot = xpaycDotzySCuda((float2*)x.V(), (float)a, (float2*)y.V(), (float2*)z.V(), length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 4*x.Volume()*sizeof(float);
if (x.Nspin() ==4 ){//wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
dot = xpaycDotzyHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() ==1 ){//wilson
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
dot = xpaycDotzyHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
return quda::Complex(dot.x, dot.y);
}
//
// double3 cDotProductNormACuda(float2 *a, float2 *b, int n) {}
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAD##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y
#define REDUCE_PARAMS x, y
#define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i);
#define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i);
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x)
#define REDUCE_Z_OPERATION(i) (a.x*a.x + a.y*a.y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAS##suffix
#define REDUCE_TYPES Float2 *a, Float2 *b
#define REDUCE_PARAMS a, b
#define REDUCE_X_AUXILIARY(i)
#define REDUCE_Y_AUXILIARY(i)
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y)
#define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x)
#define REDUCE_Z_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAH##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR(x, texHalf1, stride); \
READ_HALF_SPINOR(y, texHalf2, stride); \
REAL_DOT_FLOAT4(norm0, x0, x0); \
REAL_DOT_FLOAT4(norm1, x1, x1); \
REAL_DOT_FLOAT4(norm2, x2, x2); \
REAL_DOT_FLOAT4(norm3, x3, x3); \
REAL_DOT_FLOAT4(norm4, x4, x4); \
REAL_DOT_FLOAT4(norm5, x5, x5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT4(rdot0, x0, y0); \
REAL_DOT_FLOAT4(rdot1, x1, y1); \
REAL_DOT_FLOAT4(rdot2, x2, y2); \
REAL_DOT_FLOAT4(rdot3, x3, y3); \
REAL_DOT_FLOAT4(rdot4, x4, y4); \
REAL_DOT_FLOAT4(rdot5, x5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, x0, y0); \
IMAG_DOT_FLOAT4(idot1, x1, y1); \
IMAG_DOT_FLOAT4(idot2, x2, y2); \
IMAG_DOT_FLOAT4(idot3, x3, y3); \
IMAG_DOT_FLOAT4(idot4, x4, y4); \
IMAG_DOT_FLOAT4(idot5, x5, y5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (xc*xc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAHSt##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \
REAL_DOT_FLOAT2(norm0, x0, x0); \
REAL_DOT_FLOAT2(norm1, x1, x1); \
REAL_DOT_FLOAT2(norm2, x2, x2); \
norm0 += norm1; norm0 += norm2;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT2(rdot0, x0, y0); \
REAL_DOT_FLOAT2(rdot1, x1, y1); \
REAL_DOT_FLOAT2(rdot2, x2, y2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, x0, y0); \
IMAG_DOT_FLOAT2(idot1, x1, y1); \
IMAG_DOT_FLOAT2(idot2, x2, y2); \
idot0 += idot1; idot0 += idot2;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (xc*xc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
double3 cDotProductNormACuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cDotProductNormACuda(x.Even(), y.Even()) + cDotProductNormACuda(x.Odd(), y.Odd());
const int id = 20;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
int length = x.Length()/2;
quda::blas_bytes += 2*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
return cDotProductNormADCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return cDotProductNormASCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 2*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
return cDotProductNormAHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
return cDotProductNormAHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
//
// double3 cDotProductNormBCuda(float2 *a, float2 *b, int n) {}
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBD##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y
#define REDUCE_PARAMS x, y
#define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i);
#define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i);
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x)
#define REDUCE_Z_OPERATION(i) (b.x*b.x + b.y*b.y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBS##suffix
#define REDUCE_TYPES Float2 *a, Float2 *b
#define REDUCE_PARAMS a, b
#define REDUCE_X_AUXILIARY(i)
#define REDUCE_Y_AUXILIARY(i)
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y)
#define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x)
#define REDUCE_Z_OPERATION(i) (b[i].x*b[i].x + b[i].y*b[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBH##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR(x, texHalf1, stride); \
READ_HALF_SPINOR(y, texHalf2, stride); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT4(rdot0, x0, y0); \
REAL_DOT_FLOAT4(rdot1, x1, y1); \
REAL_DOT_FLOAT4(rdot2, x2, y2); \
REAL_DOT_FLOAT4(rdot3, x3, y3); \
REAL_DOT_FLOAT4(rdot4, x4, y4); \
REAL_DOT_FLOAT4(rdot5, x5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, x0, y0); \
IMAG_DOT_FLOAT4(idot1, x1, y1); \
IMAG_DOT_FLOAT4(idot2, x2, y2); \
IMAG_DOT_FLOAT4(idot3, x3, y3); \
IMAG_DOT_FLOAT4(idot4, x4, y4); \
IMAG_DOT_FLOAT4(idot5, x5, y5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (yc*yc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBHSt##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT2(rdot0, x0, y0); \
REAL_DOT_FLOAT2(rdot1, x1, y1); \
REAL_DOT_FLOAT2(rdot2, x2, y2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, x0, y0); \
IMAG_DOT_FLOAT2(idot1, x1, y1); \
IMAG_DOT_FLOAT2(idot2, x2, y2); \
idot0 += idot1; idot0 += idot2;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (yc*yc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
double3 cDotProductNormBCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cDotProductNormBCuda(x.Even(), y.Even()) + cDotProductNormBCuda(x.Odd(), y.Odd());
const int id = 21;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
int length = x.Length()/2;
quda::blas_bytes += 2*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
return cDotProductNormBDCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return cDotProductNormBSCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 2*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
return cDotProductNormBHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
return cDotProductNormBHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
//
// double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y,
// float2 *z, float2 *w, float2 *u, int len)
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYD##suffix
#define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u
#define REDUCE_PARAMS a, x, b, y, z, w, u
#define REDUCE_X_AUXILIARY(i) \
Float2 X = READ_DOUBLE2_TEXTURE(x, i); \
Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \
Float2 W = READ_DOUBLE2_TEXTURE(w, i);
#define REDUCE_Y_AUXILIARY(i) \
Float2 Z = read_Float2(z, i); \
Z.x += a.x*X.x - a.y*X.y; \
Z.y += a.y*X.x + a.x*X.y; \
Z.x += b.x*Y.x - b.y*Y.y; \
Z.y += b.y*Y.x + b.x*Y.y; \
Y.x -= b.x*W.x - b.y*W.y; \
Y.y -= b.y*W.x + b.x*W.y;
#define REDUCE_Z_AUXILIARY(i) \
z[i] = make_Float2(Z); \
y[i] = make_Float2(Y);
#define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y)
#define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x)
#define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYS##suffix
#define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u
#define REDUCE_PARAMS a, x, b, y, z, w, u
#define REDUCE_X_AUXILIARY(i) \
Float2 X = read_Float2(x, i); \
Float2 Y = read_Float2(y, i); \
Float2 W = read_Float2(w, i);
#define REDUCE_Y_AUXILIARY(i) \
Float2 Z = read_Float2(z, i); \
Z.x += a.x*X.x - a.y*X.y; \
Z.y += a.y*X.x + a.x*X.y; \
Z.x += b.x*Y.x - b.y*Y.y; \
Z.y += b.y*Y.x + b.x*Y.y; \
Y.x -= b.x*W.x - b.y*W.y; \
Y.y -= b.y*W.x + b.x*W.y;
#define REDUCE_Z_AUXILIARY(i) \
z[i] = make_Float2(Z); \
y[i] = make_Float2(Y);
#define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y)
#define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x)
#define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
//
// double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y,
// float2 *z, float2 *w, float2 *u, int len)
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix
#define REDUCE_TYPES Float2 a, Float2 b, short4 *yH, float *yN, short4 *zH, float *zN, float *wN, float *uN, int stride
#define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride
#define REDUCE_X_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); \
CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); \
CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); \
CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); \
CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); \
CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); \
READ_HALF_SPINOR(w, texHalf4, stride); \
float2 bwc = -wc*b; \
CAXPY_FLOAT4(bwc, w0, y0); \
CAXPY_FLOAT4(bwc, w1, y1); \
CAXPY_FLOAT4(bwc, w2, y2); \
CAXPY_FLOAT4(bwc, w3, y3); \
CAXPY_FLOAT4(bwc, w4, y4); \
CAXPY_FLOAT4(bwc, w5, y5); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_Y_AUXILIARY(i) \
READ_HALF_SPINOR(u, texHalf5, stride); \
REAL_DOT_FLOAT4(rdot0, u0, y0); \
REAL_DOT_FLOAT4(rdot1, u1, y1); \
REAL_DOT_FLOAT4(rdot2, u2, y2); \
REAL_DOT_FLOAT4(rdot3, u3, y3); \
REAL_DOT_FLOAT4(rdot4, u4, y4); \
REAL_DOT_FLOAT4(rdot5, u5, y5); \
IMAG_DOT_FLOAT4(idot0, u0, y0); \
IMAG_DOT_FLOAT4(idot1, u1, y1); \
IMAG_DOT_FLOAT4(idot2, u2, y2); \
IMAG_DOT_FLOAT4(idot3, u3, y3); \
IMAG_DOT_FLOAT4(idot4, u4, y4); \
IMAG_DOT_FLOAT4(idot5, u5, y5);
#define REDUCE_Z_AUXILIARY(i) \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_X_OPERATION(i) (uc*rdot0)
#define REDUCE_Y_OPERATION(i) (uc*idot0)
#define REDUCE_Z_OPERATION(i) (norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix
#define REDUCE_TYPES Float2 a, Float2 b, short2 *yH, float *yN, short2 *zH, float *zN, float *wN, float *uN, int stride
#define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride
#define REDUCE_X_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); \
CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); \
CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); \
READ_HALF_SPINOR_ST(w, texHalfSt4, stride); \
float2 bwc = -wc*b; \
CAXPY_FLOAT2(bwc, w0, y0); \
CAXPY_FLOAT2(bwc, w1, y1); \
CAXPY_FLOAT2(bwc, w2, y2); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_Y_AUXILIARY(i) \
READ_HALF_SPINOR_ST(u, texHalfSt5, stride); \
REAL_DOT_FLOAT2(rdot0, u0, y0); \
REAL_DOT_FLOAT2(rdot1, u1, y1); \
REAL_DOT_FLOAT2(rdot2, u2, y2); \
IMAG_DOT_FLOAT2(idot0, u0, y0); \
IMAG_DOT_FLOAT2(idot1, u1, y1); \
IMAG_DOT_FLOAT2(idot2, u2, y2);
#define REDUCE_Z_AUXILIARY(i) \
norm0 += norm1; norm0 += norm2; \
rdot0 += rdot1; rdot0 += rdot2; \
idot0 += idot1; idot0 += idot2;
#define REDUCE_X_OPERATION(i) (uc*rdot0)
#define REDUCE_Y_OPERATION(i) (uc*idot0)
#define REDUCE_Z_OPERATION(i) (norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
// This convoluted kernel does the following: z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y)
double3 caxpbypzYmbwcDotProductUYNormYCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y,
cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &u) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpbypzYmbwcDotProductUYNormYCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even(), u.Even()) +
caxpbypzYmbwcDotProductUYNormYCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd(), u.Odd());
const int id = 22;
quda::blas_flops += 18*x.RealLength();
checkSpinor(x,y);
checkSpinor(x,z);
checkSpinor(x,w);
checkSpinor(x,u);
int length = x.Length()/2;
quda::blas_bytes += 7*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
hipBindTexture(0, wTexDouble2, w.V(), spinor_bytes);
hipBindTexture(0, uTexDouble2, u.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYDCuda(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(),
(double2*)w.V(), (double2*)u.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYSCuda(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(),
(float2*)w.V(), (float2*)u.V(), length, id, x.Precision());
} else {
// fused nSpin=4 kernel is slow on Fermi
// N.B. this introduces an extra half truncation so will affect convergence (for the better?)
if (!blasTuning && (__CUDA_ARCH__ >= 200) && x.Nspin() == 4) {
caxpbypzYmbwCuda(a, x, b, y, z, w);
return cDotProductNormBCuda(u, y);
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 7*x.Volume()*sizeof(float);
if (x.Nspin() == 4) { // wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf4, w.V(), spinor_bytes);
hipBindTexture(0, texNorm4, w.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf5, u.V(), spinor_bytes);
hipBindTexture(0, texNorm5, u.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(),
y.Stride(), y.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){ // staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt4, w.V(), spinor_bytes);
hipBindTexture(0, texNorm4, w.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt5, u.V(), spinor_bytes);
hipBindTexture(0, texNorm5, u.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(),
y.Stride(), y.Volume(), id, x.Precision());
} else {
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
template <typename Float, typename Float2>
__global__ void cabxpyAxKernel(Float a, Float2 b, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
x[i].x *= a;
x[i].y *= a;
y[i].x += b.x*x[i].x - b.y*x[i].y;
y[i].y += b.y*x[i].x + b.x*x[i].y;
i += gridSize;
}
}
__global__ void cabxpyAxHKernel(float a, float2 b, short4 *xH, float *xN, short4 *yH, float *yN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AX_FLOAT4(a, x0);
AX_FLOAT4(a, x1);
AX_FLOAT4(a, x2);
AX_FLOAT4(a, x3);
AX_FLOAT4(a, x4);
AX_FLOAT4(a, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
CAXPY_FLOAT4(b, x0, y0);
CAXPY_FLOAT4(b, x1, y1);
CAXPY_FLOAT4(b, x2, y2);
CAXPY_FLOAT4(b, x3, y3);
CAXPY_FLOAT4(b, x4, y4);
CAXPY_FLOAT4(b, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void cabxpyAxHKernel(float a, float2 b, short2 *xH, float *xN, short2 *yH, float *yN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
AX_FLOAT2(a, x0);
AX_FLOAT2(a, x1);
AX_FLOAT2(a, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
CAXPY_FLOAT2(b, x0, y0);
CAXPY_FLOAT2(b, x1, y1);
CAXPY_FLOAT2(b, x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] += a*b*x[i], x[i] *= a
void cabxpyAxCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
int length = x.Length()/2;
setBlock(23, length, x.Precision());
quda::blas_bytes += 4*x.RealLength()*x.Precision();
quda::blas_flops += 5*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 b2 = make_double2(real(b), imag(b));
hipLaunchKernelGGL(( cabxpyAxKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double)a, b2, (double2*)x.V(), (double2*)y.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( cabxpyAxKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, b2, (float2*)x.V(), (float2*)y.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpyCuda(a, x.Even(), y.Even());
caxpyCuda(a, x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( cabxpyAxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( cabxpyAxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 4*x.Volume()*sizeof(float);
}
if (!blasTuning) checkCudaError();
}
//
// double caxpyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyNormF##suffix
#define REDUCE_TYPES Float a, Float *x, Float *y
#define REDUCE_PARAMS a, x, y
#define REDUCE_AUXILIARY(i) \
y[i].x += a.x*x[i].x - a.y*x[i].y; \
y[i].y += a.y*x[i].x + a.x*x[i].y
#define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix
#define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
CAXPY_FLOAT4(a, x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
CAXPY_FLOAT4(a, x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
CAXPY_FLOAT4(a, x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
CAXPY_FLOAT4(a, x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
CAXPY_FLOAT4(a, x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
CAXPY_FLOAT4(a, x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix
#define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
CAXPY_FLOAT2(a, x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
CAXPY_FLOAT2(a, x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
CAXPY_FLOAT2(a, x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double caxpyNormCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpyNormCuda(a, x.Even(), y.Even()) + caxpyNormCuda(a, x.Odd(), y.Odd());
const int id = 24;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 3*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
return caxpyNormFCuda(a2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
return caxpyNormFCuda(a2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision());
} else {
hipBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), x.Bytes());
hipBindTexture(0, texHalf2, y.V(), x.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyNormHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), x.Bytes());
hipBindTexture(0, texHalfSt2, y.V(), x.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyNormHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
//
// double caxpyXmayNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second performs the operator x[i] -= a*z[i]
// Third returns the norm of x
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXF##suffix
#define REDUCE_TYPES Float a, Float *x, Float *y, Float *z
#define REDUCE_PARAMS a, x, y, z
#define REDUCE_AUXILIARY(i) \
y[i].x += a.x*x[i].x - a.y*x[i].y; \
y[i].y += a.y*x[i].x + a.x*x[i].y; \
x[i].x += a.y*z[i].y - a.x*z[i].x; \
x[i].y -= (a.x*z[i].y + a.y*z[i].x);
#define REDUCE_OPERATION(i) (x[i].x*x[i].x + x[i].y*x[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix
#define REDUCE_TYPES Float a, short4 *xH, float *xN, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
CAXPY_FLOAT4(a, x0, y0); \
CMAXPY_FLOAT4(a, z0, x0); \
REAL_DOT_FLOAT4(norm0, x0, x0); \
CAXPY_FLOAT4(a, x1, y1); \
CMAXPY_FLOAT4(a, z1, x1); \
REAL_DOT_FLOAT4(norm1, x1, x1); \
CAXPY_FLOAT4(a, x2, y2); \
CMAXPY_FLOAT4(a, z2, x2); \
REAL_DOT_FLOAT4(norm2, x2, x2); \
CAXPY_FLOAT4(a, x3, y3); \
CMAXPY_FLOAT4(a, z3, x3); \
REAL_DOT_FLOAT4(norm3, x3, x3); \
CAXPY_FLOAT4(a, x4, y4); \
CMAXPY_FLOAT4(a, z4, x4); \
REAL_DOT_FLOAT4(norm4, x4, x4); \
CAXPY_FLOAT4(a, x5, y5); \
CMAXPY_FLOAT4(a, z5, x5); \
REAL_DOT_FLOAT4(norm5, x5, x5); \
norm0 += norm1; norm2 += norm3; \
norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix
#define REDUCE_TYPES Float a, short2 *xH, float *xN, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
CAXPY_FLOAT2(a, x0, y0); \
CMAXPY_FLOAT2(a, z0, x0); \
REAL_DOT_FLOAT2(norm0, x0, x0); \
CAXPY_FLOAT2(a, x1, y1); \
CMAXPY_FLOAT2(a, z1, x1); \
REAL_DOT_FLOAT2(norm1, x1, x1); \
CAXPY_FLOAT2(a, x2, y2); \
CMAXPY_FLOAT2(a, z2, x2); \
REAL_DOT_FLOAT2(norm2, x2, x2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double caxpyXmazNormXCuda(const quda::Complex &a, cudaColorSpinorField &x,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpyXmazNormXCuda(a, x.Even(), y.Even(), z.Even()) +
caxpyXmazNormXCuda(a, x.Odd(), y.Odd(), z.Odd());
const int id = 25;
quda::blas_flops += 10*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 5*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
return caxpyXmazNormXFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
return caxpyXmazNormXFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), x.Length()/2, id, x.Precision());
} else {
hipBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm3, z.Norm(), z.Bytes()/(z.Ncolor()*z.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), x.Bytes());
hipBindTexture(0, texHalf2, y.V(), x.Bytes());
hipBindTexture(0, texHalf3, z.V(), z.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyXmazNormXHCuda(a2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), x.Bytes());
hipBindTexture(0, texHalfSt2, y.V(), x.Bytes());
hipBindTexture(0, texHalfSt3, z.V(), z.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyXmazNormXHCuda(a2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
//
// double cabxpyAxNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) cabxpyAxNormF##suffix
#define REDUCE_TYPES Float a, Float b, Float *x, Float *y
#define REDUCE_PARAMS a, b, x, y
#define REDUCE_AUXILIARY(i) \
x[i].x *= a.x; \
x[i].y *= a.x; \
y[i].x += b.x*x[i].x - b.y*x[i].y; \
y[i].y += b.y*x[i].x + b.x*x[i].y;
#define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix
#define REDUCE_TYPES Float a, Float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
AX_FLOAT4(a.x, x0); \
AX_FLOAT4(a.x, x1); \
AX_FLOAT4(a.x, x2); \
AX_FLOAT4(a.x, x3); \
AX_FLOAT4(a.x, x4); \
AX_FLOAT4(a.x, x5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); \
CAXPY_FLOAT4(b, x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
CAXPY_FLOAT4(b, x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
CAXPY_FLOAT4(b, x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
CAXPY_FLOAT4(b, x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
CAXPY_FLOAT4(b, x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
CAXPY_FLOAT4(b, x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix
#define REDUCE_TYPES Float a, Float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
AX_FLOAT2(a.x, x0); \
AX_FLOAT2(a.x, x1); \
AX_FLOAT2(a.x, x2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); \
CAXPY_FLOAT2(b, x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
CAXPY_FLOAT2(b, x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
CAXPY_FLOAT2(b, x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double cabxpyAxNormCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cabxpyAxNormCuda(a, b, x.Even(), y.Even()) + cabxpyAxNormCuda(a, b, x.Odd(), y.Odd());
const int id = 26;
quda::blas_flops += 7*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 4*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(a, 0);
double2 b2 = make_double2(real(b), imag(b));
return cabxpyAxNormFCuda(a2, b2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(a, 0);
float2 b2 = make_float2(real(b), imag(b));
return cabxpyAxNormFCuda(a2, b2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision());
} else {
hipBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), x.Bytes());
hipBindTexture(0, texHalf2, y.V(), x.Bytes());
float2 a2 = make_float2(a, 0);
float2 b2 = make_float2(real(b), imag(b));
return cabxpyAxNormHCuda(a2, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), x.Bytes());
hipBindTexture(0, texHalfSt2, y.V(), x.Bytes());
float2 a2 = make_float2(a, 0);
float2 b2 = make_float2(real(b), imag(b));
return cabxpyAxNormHCuda(a2, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
template <typename Float2>
__global__ void caxpbypzDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = READ_DOUBLE2_TEXTURE(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = READ_DOUBLE2_TEXTURE(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbypzSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = read_Float2(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = read_Float2(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
i += gridSize;
}
}
__global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN,
short4 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CAXPBYPZ_FLOAT4(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT4(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT4(a, x2, b, y2, z2);
CAXPBYPZ_FLOAT4(a, x3, b, y3, z3);
CAXPBYPZ_FLOAT4(a, x4, b, y4, z4);
CAXPBYPZ_FLOAT4(a, x5, b, y5, z5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride);
i += gridSize;
}
}
__global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN,
short2 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CAXPBYPZ_FLOAT2(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT2(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT2(a, x2, b, y2, z2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride);
i += gridSize;
}
}
// performs the operation z[i] = a*x[i] + b*y[i] + z[i]
void caxpbypzCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
checkSpinor(x,y);
checkSpinor(x,z);
int length = x.Length()/2;
setBlock(27, length, x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
hipLaunchKernelGGL(( caxpbypzDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
hipLaunchKernelGGL(( caxpbypzSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbypzCuda(a, x.Even(), b, y.Even(), z.Even());
caxpbypzCuda(a, x.Odd(), b, y.Odd(), z.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 6*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( caxpbypzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( caxpbypzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
quda::blas_bytes += 4*x.RealLength()*x.Precision();
quda::blas_flops += 8*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpbypczpwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y,
Float2 c, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 W = read_Float2(w, i);
Float2 X = READ_DOUBLE2_TEXTURE(x, i);
CAXPY_DOUBLE2(a, X, W);
Float2 Y = READ_DOUBLE2_TEXTURE(y, i);
CAXPY_DOUBLE2(b, Y, W);
Float2 Z = READ_DOUBLE2_TEXTURE(z, i);
CAXPY_DOUBLE2(c, Z, W);
w[i] = make_Float2(W);
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbypczpwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y,
Float2 c, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 W = read_Float2(w, i);
Float2 X = read_Float2(x, i);
CAXPY_FLOAT2(a, X, W);
Float2 Y = read_Float2(y, i);
CAXPY_FLOAT2(b, Y, W);
Float2 Z = read_Float2(z, i);
CAXPY_FLOAT2(c, Z, W);
w[i] = make_Float2(W);
i += gridSize;
}
}
__global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short4 *yH, float *yN,
short4 *zH, float *zN, short4* wH, float *wN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(w, texHalf4, texNorm4, stride);
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
CAXPY_FLOAT4(a, x0, w0);
CAXPY_FLOAT4(a, x1, w1);
CAXPY_FLOAT4(a, x2, w2);
CAXPY_FLOAT4(a, x3, w3);
CAXPY_FLOAT4(a, x4, w4);
CAXPY_FLOAT4(a, x5, w5);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
CAXPY_FLOAT4(b, y0, w0);
CAXPY_FLOAT4(b, y1, w1);
CAXPY_FLOAT4(b, y2, w2);
CAXPY_FLOAT4(b, y3, w3);
CAXPY_FLOAT4(b, y4, w4);
CAXPY_FLOAT4(b, y5, w5);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CAXPY_FLOAT4(c, z0, w0);
CAXPY_FLOAT4(c, z1, w1);
CAXPY_FLOAT4(c, z2, w2);
CAXPY_FLOAT4(c, z3, w3);
CAXPY_FLOAT4(c, z4, w4);
CAXPY_FLOAT4(c, z5, w5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(wH, wN, w, stride);
i += gridSize;
}
}
__global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short2 *yH, float *yN,
short2 *zH, float *zN, short2 *wH, float *wN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(w, texHalfSt4, texNorm4, stride);
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
CAXPY_FLOAT2(a, x0, w0);
CAXPY_FLOAT2(a, x1, w1);
CAXPY_FLOAT2(a, x2, w2);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
CAXPY_FLOAT2(b, y0, w0);
CAXPY_FLOAT2(b, y1, w1);
CAXPY_FLOAT2(b, y2, w2);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CAXPY_FLOAT2(c, z0, w0);
CAXPY_FLOAT2(c, z1, w1);
CAXPY_FLOAT2(c, z2, w2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(wH, wN, w, stride);
i += gridSize;
}
}
// performs the operation z[i] = a*x[i] + b*y[i] + c*z[i] + w[i]
void caxpbypczpwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y,
const quda::Complex &c, cudaColorSpinorField &z, cudaColorSpinorField &w) {
checkSpinor(x,y);
checkSpinor(x,z);
checkSpinor(x,w);
int length = x.Length()/2;
setBlock(28, length, x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
hipBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
hipBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
hipBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
hipBindTexture(0, wTexDouble2, w.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
double2 c2 = make_double2(real(c), imag(c));
hipLaunchKernelGGL(( caxpbypczpwDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(),
c2, (double2*)z.V(), (double2*)w.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
float2 c2 = make_float2(real(c), imag(c));
hipLaunchKernelGGL(( caxpbypczpwSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(),
c2, (float2*)z.V(), (float2*)w.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbypczpwCuda(a, x.Even(), b, y.Even(), c, z.Even(), w.Even());
caxpbypczpwCuda(a, x.Odd(), b, y.Odd(), c, z.Odd(), w.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 6*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
float2 c2 = make_float2(real(c), imag(c));
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
hipBindTexture(0, texHalf4, w.V(), spinor_bytes);
hipBindTexture(0, texNorm4, w.Norm(), spinor_bytes/12);
hipLaunchKernelGGL(( caxpbypczpwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, c2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), (short4*)w.V(), (float*)w.Norm(),
z.Stride(), z.Volume());
} else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
hipBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
hipBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
hipBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
hipBindTexture(0, texHalfSt4, w.V(), spinor_bytes);
hipBindTexture(0, texNorm4, w.Norm(), spinor_bytes/3);
hipLaunchKernelGGL(( caxpbypczpwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, c2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), (short2*)w.V(), (float*)w.Norm(),
z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
quda::blas_bytes += 5*x.RealLength()*x.Precision();
quda::blas_flops += 12*x.RealLength();
if (!blasTuning) checkCudaError();
}
//
// double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the dot product (z,y)
//
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpyDotzyF##suffix
#define REDUCE_TYPES Float2 a, Float2 *x, Float2 *y, Float2 *z, Float c
#define REDUCE_PARAMS a, x, y, z, c
#define REDUCE_REAL_AUXILIARY(i) y[i].x += a.x*x[i].x - a.y*x[i].y;
#define REDUCE_IMAG_AUXILIARY(i) y[i].y += a.y*x[i].x + a.x*x[i].y;
#define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y)
#define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix
#define REDUCE_TYPES Float2 a, short4 *yH, Float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
CAXPY_FLOAT4(a, x0, y0); \
CAXPY_FLOAT4(a, x1, y1); \
CAXPY_FLOAT4(a, x2, y2); \
CAXPY_FLOAT4(a, x3, y3); \
CAXPY_FLOAT4(a, x4, y4); \
CAXPY_FLOAT4(a, x5, y5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_IMAG_AUXILIARY(i) \
REAL_DOT_FLOAT4(rdot0, z0, y0); \
REAL_DOT_FLOAT4(rdot1, z1, y1); \
REAL_DOT_FLOAT4(rdot2, z2, y2); \
REAL_DOT_FLOAT4(rdot3, z3, y3); \
REAL_DOT_FLOAT4(rdot4, z4, y4); \
REAL_DOT_FLOAT4(rdot5, z5, y5); \
IMAG_DOT_FLOAT4(idot0, z0, y0); \
IMAG_DOT_FLOAT4(idot1, z1, y1); \
IMAG_DOT_FLOAT4(idot2, z2, y2); \
IMAG_DOT_FLOAT4(idot3, z3, y3); \
IMAG_DOT_FLOAT4(idot4, z4, y4); \
IMAG_DOT_FLOAT4(idot5, z5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix
#define REDUCE_TYPES Float2 a, short2 *yH, Float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
CAXPY_FLOAT2(a, x0, y0); \
CAXPY_FLOAT2(a, x1, y1); \
CAXPY_FLOAT2(a, x2, y2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_IMAG_AUXILIARY(i) \
REAL_DOT_FLOAT2(rdot0, z0, y0); \
REAL_DOT_FLOAT2(rdot1, z1, y1); \
REAL_DOT_FLOAT2(rdot2, z2, y2); \
IMAG_DOT_FLOAT2(idot0, z0, y0); \
IMAG_DOT_FLOAT2(idot1, z1, y1); \
IMAG_DOT_FLOAT2(idot2, z2, y2); \
rdot0 += rdot1; rdot0 += rdot2; \
idot0 += idot1; idot0 += idot2;
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
quda::Complex caxpyDotzyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y,
cudaColorSpinorField &z) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpyDotzyCuda(a, x.Even(), y.Even(), z.Even()) +
caxpyDotzyCuda(a, x.Odd(), y.Odd(), z.Odd());
const int id = 29;
quda::blas_flops += 8*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 4*x.RealLength()*x.Precision();
double2 dot;
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
char c = 0;
double2 a2 = make_double2(real(a), imag(a));
dot = caxpyDotzyFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), c, x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
char c = 0;
float2 a2 = make_float2(real(a), imag(a));
dot = caxpyDotzyFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), c, x.Length()/2, id, x.Precision());
} else {
hipBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
hipBindTexture(0, texNorm3, z.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
if (x.Nspin() == 4){ //wilson
hipBindTexture(0, texHalf1, x.V(), x.Bytes());
hipBindTexture(0, texHalf2, y.V(), x.Bytes());
hipBindTexture(0, texHalf3, z.V(), x.Bytes());
dot = caxpyDotzyHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
hipBindTexture(0, texHalfSt1, x.V(), x.Bytes());
hipBindTexture(0, texHalfSt2, y.V(), x.Bytes());
hipBindTexture(0, texHalfSt3, z.V(), x.Bytes());
dot = caxpyDotzyHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
return quda::Complex(dot.x, dot.y);
}
| 16e9e24eabd8e9240896b890abeb2b707d0fe41d.cu | #include <stdlib.h>
#include <stdio.h>
#include <quda_internal.h>
#include <blas_quda.h>
#include <color_spinor_field.h>
#include <face_quda.h> // this is where the MPI / QMP depdendent code is
#include <cuComplex.h>
#define REDUCE_MAX_BLOCKS 65536
#define REDUCE_DOUBLE 64
#define REDUCE_KAHAN 32
#if (__CUDA_ARCH__ >= 130)
#define REDUCE_TYPE REDUCE_DOUBLE
#define QudaSumFloat double
#define QudaSumComplex cuDoubleComplex
#define QudaSumFloat3 double3
#else
#define REDUCE_TYPE REDUCE_KAHAN
#define QudaSumFloat float
#define QudaSumComplex cuComplex
#define QudaSumFloat3 float3
#endif
// These are used for reduction kernels
static QudaSumFloat *d_reduceFloat=0;
static QudaSumComplex *d_reduceComplex=0;
static QudaSumFloat3 *d_reduceFloat3=0;
static QudaSumFloat *h_reduceFloat=0;
static QudaSumComplex *h_reduceComplex=0;
static QudaSumFloat3 *h_reduceFloat3=0;
namespace quda {
unsigned long long blas_flops;
unsigned long long blas_bytes;
}
static dim3 blasBlock;
static dim3 blasGrid;
// generated by blas_test
#include <blas_param.h>
double2 operator+(const double2& x, const double2 &y) {
return make_double2(x.x + y.x, x.y + y.y);
}
double3 operator+(const double3& x, const double3 &y) {
double3 z;
z.x = x.x + y.x; z.y = x.y + y.y; z.z = x.z + y.z;
return z;
}
__device__ float2 operator*(const float a, const float2 x) {
float2 y;
y.x = a*x.x;
y.y = a*x.y;
return y;
}
template <typename Float2>
__device__ Float2 operator+(const Float2 x, const Float2 y) {
Float2 z;
z.x = x.x + y.x;
z.y = x.y + y.y;
return z;
}
template <typename Float2>
__device__ Float2 operator+=(Float2 &x, const Float2 y) {
x.x += y.x;
x.y += y.y;
return x;
}
template <typename Float2>
__device__ Float2 operator-=(Float2 &x, const Float2 y) {
x.x -= y.x;
x.y -= y.y;
return x;
}
template <typename Float, typename Float2>
__device__ Float2 operator*=(Float2 &x, const Float a) {
x.x *= a;
x.y *= a;
return x;
}
template <typename Float>
__device__ float4 operator*=(float4 &a, const Float &b) {
a.x *= b;
a.y *= b;
a.z *= b;
a.w *= b;
return a;
}
void zeroCuda(cudaColorSpinorField &a) { a.zero(); }
// blasTuning = 1 turns off error checking
static QudaTune blasTuning = QUDA_TUNE_NO;
namespace quda {
void initBlas(void)
{
if (!d_reduceFloat) {
if (cudaMalloc((void**) &d_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == cudaErrorMemoryAllocation) {
errorQuda("Error allocating device reduction array");
}
}
if (!d_reduceComplex) {
if (cudaMalloc((void**) &d_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == cudaErrorMemoryAllocation) {
errorQuda("Error allocating device reduction array");
}
}
if (!d_reduceFloat3) {
if (cudaMalloc((void**) &d_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == cudaErrorMemoryAllocation) {
errorQuda("Error allocating device reduction array");
}
}
if (!h_reduceFloat) {
if (cudaMallocHost((void**) &h_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == cudaErrorMemoryAllocation) {
errorQuda("Error allocating host reduction array");
}
}
if (!h_reduceComplex) {
if (cudaMallocHost((void**) &h_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == cudaErrorMemoryAllocation) {
errorQuda("Error allocating host reduction array");
}
}
if (!h_reduceFloat3) {
if (cudaMallocHost((void**) &h_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == cudaErrorMemoryAllocation) {
errorQuda("Error allocating host reduction array");
}
}
}
void endBlas(void)
{
if (d_reduceFloat) {
cudaFree(d_reduceFloat);
d_reduceFloat = 0;
}
if (d_reduceComplex) {
cudaFree(d_reduceComplex);
d_reduceComplex = 0;
}
if (d_reduceFloat3) {
cudaFree(d_reduceFloat3);
d_reduceFloat3 = 0;
}
if (h_reduceFloat) {
cudaFreeHost(h_reduceFloat);
h_reduceFloat = 0;
}
if (h_reduceComplex) {
cudaFreeHost(h_reduceComplex);
h_reduceComplex = 0;
}
if (h_reduceFloat3) {
cudaFreeHost(h_reduceFloat3);
h_reduceFloat3 = 0;
}
}
void setBlasTuning(QudaTune tune)
{
blasTuning = tune;
}
void setBlasParam(int kernel, int prec, int threads, int blocks)
{
blas_threads[kernel][prec] = threads;
blas_blocks[kernel][prec] = blocks;
}
}
void setBlock(int kernel, int length, QudaPrecision precision)
{
int prec;
switch(precision) {
case QUDA_HALF_PRECISION:
prec = 0;
break;
case QUDA_SINGLE_PRECISION:
prec = 1;
break;
case QUDA_DOUBLE_PRECISION:
prec = 2;
break;
}
int blocks = min(blas_blocks[kernel][prec], max(length/blas_threads[kernel][prec], 1));
blasBlock.x = blas_threads[kernel][prec];
blasBlock.y = 1;
blasBlock.z = 1;
blasGrid.x = blocks;
blasGrid.y = 1;
blasGrid.z = 1;
}
#if (__CUDA_ARCH__ >= 130)
static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
int4 v = tex1Dfetch(t,i);
return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z));
}
#else
static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
// do nothing
return make_double2(0.0, 0.0);
}
#endif
float2 __device__ read_Float2(float2 *x, int i) {
return make_float2(x[i].x, x[i].y);
}
double2 __device__ read_Float2(double2 *x, int i) {
return make_double2(x[i].x, x[i].y);
}
#if (__CUDA_ARCH__ >= 200)
#define READ_DOUBLE2_TEXTURE(x, i) \
read_Float2(x, i)
#else
#define READ_DOUBLE2_TEXTURE(x, i) \
fetch_double2(x##TexDouble2, i)
#endif
#define READ_FLOAT2_TEXTURE(x, i) \
tex1Dfetch(x##TexSingle2, i)
float2 __device__ make_Float2(float2 x) {
return make_float2(x.x, x.y);
}
double2 __device__ make_Float2(double2 x) {
return make_double2(x.x, x.y);
}
#define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \
float a##c = tex1Dfetch(texNorm, i); \
float4 a##0 = tex1Dfetch(texHalf, i + 0*length); \
float4 a##1 = tex1Dfetch(texHalf, i + 1*length); \
float4 a##2 = tex1Dfetch(texHalf, i + 2*length); \
float4 a##3 = tex1Dfetch(texHalf, i + 3*length); \
float4 a##4 = tex1Dfetch(texHalf, i + 4*length); \
float4 a##5 = tex1Dfetch(texHalf, i + 5*length); \
a##0 *= a##c; \
a##1 *= a##c; \
a##2 *= a##c; \
a##3 *= a##c; \
a##4 *= a##c; \
a##5 *= a##c;
#define RECONSTRUCT_HALF_SPINOR_ST(a, texHalf, texNorm, length) \
float a##c = tex1Dfetch(texNorm, i); \
float2 a##0 = tex1Dfetch(texHalf, i + 0*length); \
float2 a##1 = tex1Dfetch(texHalf, i + 1*length); \
float2 a##2 = tex1Dfetch(texHalf, i + 2*length); \
(a##0) *= a##c; \
(a##1) *= a##c; \
(a##2) *= a##c;
// Some musings on how to clean up the blas code using Boost
/*#define BOOST_RECONSTRUCT_HALF_SPINOR(z, j, a, texHalf, length) \
float4 a##k tex1Dfetch(texHalf, i + j*length); \
a##k *= a##c;
#define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \
BOOST_PP_REPEAT(6, BOOST_RECONSTRUCT_HALF_SPINOR, a, texHalf, length) \
*/
#define READ_HALF_SPINOR_TEX(a, tex, texNorm, length) \
float a##c = tex1Dfetch(texNorm, i); \
float4 a##0 = tex1Dfetch(tex, i + 0*length); \
float4 a##1 = tex1Dfetch(tex, i + 1*length); \
float4 a##2 = tex1Dfetch(tex, i + 2*length); \
float4 a##3 = tex1Dfetch(tex, i + 3*length); \
float4 a##4 = tex1Dfetch(tex, i + 4*length); \
float4 a##5 = tex1Dfetch(tex, i + 5*length); \
#define READ_HALF_SPINOR(a, tex, length) \
float4 a##0 = tex1Dfetch(tex, i + 0*length); \
float4 a##1 = tex1Dfetch(tex, i + 1*length); \
float4 a##2 = tex1Dfetch(tex, i + 2*length); \
float4 a##3 = tex1Dfetch(tex, i + 3*length); \
float4 a##4 = tex1Dfetch(tex, i + 4*length); \
float4 a##5 = tex1Dfetch(tex, i + 5*length); \
float a##c = a##N[i];
#define READ_HALF_SPINOR_ST(a, tex, length) \
float2 a##0 = tex1Dfetch(tex, i + 0*length); \
float2 a##1 = tex1Dfetch(tex, i + 1*length); \
float2 a##2 = tex1Dfetch(tex, i + 2*length); \
float a##c = a##N[i];
#define FAST_ABS_MAX(a, b) fmaxf(fabsf(a), fabsf(b));
#define FAST_MAX(a, b) fmaxf(a, b);
__device__ float fast_abs_max(float4 a) {
float c0 = FAST_ABS_MAX(a.x, a.y);
float c1 = FAST_ABS_MAX(a.z, a.w);
return FAST_MAX(c0, c1);
}
#define CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, n, a, length) { \
float c0 = fast_abs_max(a##0); \
float c1 = fast_abs_max(a##1); \
c0 = FAST_MAX(c0, c1); \
float c2 = fast_abs_max(a##2); \
float c3 = fast_abs_max(a##3); \
c1 = FAST_MAX(c2, c3); \
c0 = FAST_MAX(c0, c1); \
c2 = fast_abs_max(a##4); \
c3 = fast_abs_max(a##5); \
c1 = FAST_MAX(c2, c3); \
c0 = FAST_MAX(c0, c1); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \
(short)(C*(float)(a##0).z), (short)(C*(float)(a##0).w)); \
h[i+1*length] = make_short4((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y), \
(short)(C*(float)(a##1).z), (short)(C*(float)(a##1).w)); \
h[i+2*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \
(short)(C*(float)(a##2).z), (short)(C*(float)(a##2).w)); \
h[i+3*length] = make_short4((short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y), \
(short)(C*(float)(a##3).z), (short)(C*(float)(a##3).w)); \
h[i+4*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \
(short)(C*(float)(a##4).z), (short)(C*(float)(a##4).w)); \
h[i+5*length] = make_short4((short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y), \
(short)(C*(float)(a##5).z), (short)(C*(float)(a##5).w));}
#define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, n, a, length) \
{float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \
float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \
float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \
float c3 = fmaxf(fabsf((a##3).x), fabsf((a##3).y)); \
float c4 = fmaxf(fabsf((a##4).x), fabsf((a##4).y)); \
float c5 = fmaxf(fabsf((a##5).x), fabsf((a##5).y)); \
float c6 = fmaxf(fabsf((a##6).x), fabsf((a##6).y)); \
float c7 = fmaxf(fabsf((a##7).x), fabsf((a##7).y)); \
float c8 = fmaxf(fabsf((a##8).x), fabsf((a##8).y)); \
float c9 = fmaxf(fabsf((a##9).x), fabsf((a##9).y)); \
float c10 = fmaxf(fabsf((a##10).x), fabsf((a##10).y)); \
float c11 = fmaxf(fabsf((a##11).x), fabsf((a##11).y)); \
c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); c2 = fmaxf(c4, c5); c3 = fmaxf(c6, c7); \
c4 = fmaxf(c8, c9); c5 = fmaxf(c10, c11); c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); \
c2 = fmaxf(c4, c5); c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \
(short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \
h[i+1*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \
(short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y)); \
h[i+2*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \
(short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y)); \
h[i+3*length] = make_short4((short)(C*(float)(a##6).x), (short)(C*(float)(a##6).y), \
(short)(C*(float)(a##7).x), (short)(C*(float)(a##7).y)); \
h[i+4*length] = make_short4((short)(C*(float)(a##8).x), (short)(C*(float)(a##8).y), \
(short)(C*(float)(a##9).x), (short)(C*(float)(a##9).y)); \
h[i+5*length] = make_short4((short)(C*(float)(a##10).x), (short)(C*(float)(a##10).y), \
(short)(C*(float)(a##11).x), (short)(C*(float)(a##11).y));}
#define CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, n, a, length) \
{float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \
float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \
float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \
c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \
h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \
h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));}
#define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, n, a, length) \
{float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \
float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \
float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \
c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \
n[i] = c0; \
float C = __fdividef(MAX_SHORT, c0); \
h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \
h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \
h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));}
#define SUM_FLOAT4(sum, a) \
float sum = fabs(a.x) + fabs(a.y) + fabs(a.z) + fabs(a.w);
#define SUM_FLOAT2(sum, a) \
float sum = fabs(a.x) + fabs(a.y);
#if (__CUDA_ARCH__ < 200)
#define REAL_DOT_FLOAT4(dot, a, b) \
float dot = a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
#else
#define REAL_DOT_FLOAT4(dot, a, b) \
float dot = fmaf(a.x, b.x, 0.0f); \
dot = fmaf(a.y, b.y, dot); \
dot = fmaf(a.z, b.z, dot); \
dot = fmaf(a.w, b.w, dot)
#endif
#define REAL_DOT_FLOAT2(dot, a, b) \
float dot = a.x*b.x + a.y*b.y;
#if (__CUDA_ARCH__ < 200)
#define IMAG_DOT_FLOAT4(dot, a, b) \
float dot = a.x*b.y - a.y*b.x + a.z*b.w - a.w*b.z;
#else
#define IMAG_DOT_FLOAT4(dot, a, b) \
float dot = fmaf(a.x, b.y, 0.0f); \
dot = fmaf(-a.y, b.x, dot); \
dot = fmaf(a.z, b.w, dot); \
dot = fmaf(-a.w, b.z, dot)
#endif
#define IMAG_DOT_FLOAT2(dot, a, b) \
float dot = a.x*b.y - a.y*b.x;
#define AX_FLOAT4(a, X) \
X.x *= a; X.y *= a; X.z *= a; X.w *= a;
#define AX_FLOAT2(a, X) \
X.x *= a; X.y *= a;
#define XPY_FLOAT4(X, Y) \
Y.x += X.x; Y.y += X.y; Y.z += X.z; Y.w += X.w;
#define XPY_FLOAT2(X, Y) \
Y.x += X.x; Y.y += X.y;
#define XMY_FLOAT4(X, Y) \
Y.x = X.x - Y.x; Y.y = X.y - Y.y; Y.z = X.z - Y.z; Y.w = X.w - Y.w;
#define XMY_FLOAT2(X, Y) \
Y.x = X.x - Y.x; Y.y = X.y - Y.y;
#define MXPY_FLOAT4(X, Y) \
Y.x -= X.x; Y.y -= X.y; Y.z -= X.z; Y.w -= X.w;
#define MXPY_FLOAT2(X, Y) \
Y.x -= X.x; Y.y -= X.y;
#if (__CUDA_ARCH__ < 200)
#define AXPY_FLOAT4(a, X, Y) \
Y.x += a*X.x; Y.y += a*X.y; \
Y.z += a*X.z; Y.w += a*X.w;
#else
#define AXPY_FLOAT4(a, X, Y) \
Y.x = fmaf(a, X.x, Y.x); Y.y = fmaf(a, X.y, Y.y); \
Y.z = fmaf(a, X.z, Y.z); Y.w = fmaf(a, X.w, Y.w);
#endif
#define AXPY_FLOAT2(a, X, Y) \
Y.x += a*X.x; Y.y += a*X.y;
#define AXPBY_FLOAT4(a, X, b, Y) \
Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \
Y.z = b*Y.z; Y.z += a*X.z; Y.w = b*Y.w; Y.w += a*X.w;
#define AXPBY_FLOAT2(a, X, b, Y) \
Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \
#if (__CUDA_ARCH__ < 200)
#define XPAY_FLOAT4(X, a, Y) \
Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y; \
Y.z = X.z + a*Y.z; Y.w = X.w + a*Y.w;
#else
#define XPAY_FLOAT4(X, a, Y) \
Y.x = fmaf(a, Y.x, X.x); Y.y = fmaf(a, Y.y, X.y); \
Y.z = fmaf(a, Y.z, X.z); Y.w = fmaf(a, Y.w, X.w);
#endif
#define XPAY_FLOAT2(X, a, Y) \
Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y;
#if (__CUDA_ARCH__ < 200)
#define CAXPY_FLOAT4(a, X, Y) \
Y.x += a.x*X.x; Y.x -= a.y*X.y; \
Y.y += a.y*X.x; Y.y += a.x*X.y; \
Y.z += a.x*X.z; Y.z -= a.y*X.w; \
Y.w += a.y*X.z; Y.w += a.x*X.w;
#else
#define CAXPY_FLOAT4(a, X, Y) \
Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \
Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y); \
Y.z = fmaf(a.x, X.z, Y.z); Y.z = fmaf(-a.y, X.w, Y.z); \
Y.w = fmaf(a.y, X.z, Y.w); Y.w = fmaf( a.x, X.w, Y.w);
#endif // (__CUDA_ARCH__ < 200)
#if (__CUDA_ARCH__ < 200)
#define CAXPY_FLOAT2(a, X, Y) \
Y.x += a.x*X.x; Y.x -= a.y*X.y; \
Y.y += a.y*X.x; Y.y += a.x*X.y;
#else
#define CAXPY_FLOAT2(a, X, Y) \
Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \
Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y);
#endif // (__CUDA_ARCH__ < 200)
#define CAXPY_DOUBLE2(a, X, Y) \
Y.x += a.x*X.x; Y.x -= a.y*X.y; \
Y.y += a.y*X.x; Y.y += a.x*X.y; \
#define CMAXPY_FLOAT4(a, X, Y) \
Y.x -= a.x*X.x; Y.x += a.y*X.y; \
Y.y -= a.y*X.x; Y.y -= a.x*X.y; \
Y.z -= a.x*X.z; Y.z += a.y*X.w; \
Y.w -= a.y*X.z; Y.w -= a.x*X.w;
#define CMAXPY_FLOAT2(a, X, Y) \
Y.x -= a.x*X.x; Y.x += a.y*X.y; \
Y.y -= a.y*X.x; Y.y -= a.x*X.y;
#define CAXPBY_FLOAT4(a, X, b, Y) \
{ float2 y; \
y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \
y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \
Y.x = y.x; Y.y = y.y; \
y.x = a.x*X.z; y.x -= a.y*X.w; y.x += b.x*Y.z; y.x -= b.y*Y.w; \
y.y = a.y*X.z; y.y += a.x*X.w; y.y += b.y*Y.z; y.y += b.x*Y.w; \
Y.z = y.x; Y.w = y.y;}
#define CAXPBY_FLOAT2(a, X, b, Y) \
{ float2 y; \
y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \
y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \
Y.x = y.x; Y.y = y.y;}
#define CXPAYPBZ_FLOAT4(X, a, Y, b, Z) \
{float2 z; \
z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \
z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \
Z.x = z.x; Z.y = z.y; \
z.x = X.z + a.x*Y.z; z.x -= a.y*Y.w; z.x += b.x*Z.z; z.x -= b.y*Z.w; \
z.y = X.w + a.y*Y.z; z.y += a.x*Y.w; z.y += b.y*Z.z; z.y += b.x*Z.w; \
Z.z = z.x; Z.w = z.y;}
#define CXPAYPBZ_FLOAT2(X, a, Y, b, Z) \
{float2 z; \
z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \
z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \
Z.x = z.x; Z.y = z.y;}
#if (__CUDA_ARCH__ < 200)
#define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \
Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \
Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y; \
Z.z += a.x*X.z - a.y*X.w + b.x*Y.z - b.y*Y.w; \
Z.w += a.y*X.z + a.x*X.w + b.y*Y.z + b.x*Y.w;
#else
#define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \
Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \
Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y); \
Z.z = fmaf(a.x, X.z, Z.z); Z.z = fmaf(-a.y, X.w, Z.z); Z.z = fmaf(b.x, Y.z, Z.z); Z.z = fmaf(-b.y, Y.w, Z.z); \
Z.w = fmaf(a.y, X.z, Z.w); Z.w = fmaf( a.x, X.w, Z.w); Z.w = fmaf(b.y, Y.z, Z.w); Z.w = fmaf( b.x, Y.w, Z.w);
#endif // (__CUDA_ARCH__ < 200)
#if (__CUDA_ARCH__ < 200)
#define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \
Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \
Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y;
#else
#define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \
Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \
Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y);
#endif // (__CUDA_ARCH__ < 200)
// Double precision input spinor field
texture<int4, 1> xTexDouble2;
texture<int4, 1> yTexDouble2;
texture<int4, 1> zTexDouble2;
texture<int4, 1> wTexDouble2;
texture<int4, 1> uTexDouble2;
// Single precision input spinor field
texture<float2, 1> xTexSingle2;
texture<float2, 1> yTexSingle2;
texture<float4, 1> xTexSingle4;
// Half precision input spinor field
texture<short4, 1, cudaReadModeNormalizedFloat> texHalf1;
texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt1;
texture<float, 1, cudaReadModeElementType> texNorm1;
// Half precision input spinor field
texture<short4, 1, cudaReadModeNormalizedFloat> texHalf2;
texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt2;
texture<float, 1, cudaReadModeElementType> texNorm2;
// Half precision input spinor field
texture<short4, 1, cudaReadModeNormalizedFloat> texHalf3;
texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt3;
texture<float, 1, cudaReadModeElementType> texNorm3;
// Half precision input spinor field
texture<short4, 1, cudaReadModeNormalizedFloat> texHalf4;
texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt4;
texture<float, 1, cudaReadModeElementType> texNorm4;
// Half precision input spinor field
texture<short4, 1, cudaReadModeNormalizedFloat> texHalf5;
texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt5;
texture<float, 1, cudaReadModeElementType> texNorm5;
#define checkSpinor(a, b) \
{ \
if (a.Precision() != b.Precision()) \
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
}
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) { \
errorQuda("engths do not match: %d %d", a.Length(), b.Length()); \
}
__global__ void convertDSKernel(double2 *dst, float4 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<6; k++) {
dst[2*k*length+i].x = src[k*length+i].x;
dst[2*k*length+i].y = src[k*length+i].y;
dst[(2*k+1)*length+i].x = src[k*length+i].z;
dst[(2*k+1)*length+i].y = src[k*length+i].w;
}
i += gridSize;
}
}
__global__ void convertDSKernel(double2 *dst, float2 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<3; k++) {
dst[k*length+i].x = src[k*length+i].x;
dst[k*length+i].y = src[k*length+i].y;
}
i += gridSize;
}
}
__global__ void convertSDKernel(float4 *dst, double2 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<6; k++) {
dst[k*length+i].x = src[2*k*length+i].x;
dst[k*length+i].y = src[2*k*length+i].y;
dst[k*length+i].z = src[(2*k+1)*length+i].x;
dst[k*length+i].w = src[(2*k+1)*length+i].y;
}
i += gridSize;
}
}
__global__ void convertSDKernel(float2 *dst, double2 *src, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
for (int k=0; k<3; k++) {
dst[k*length+i].x = src[k*length+i].x;
dst[k*length+i].y = src[k*length+i].y;
}
i += gridSize;
}
}
__global__ void convertHSKernel(short4 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
float4 F0 = tex1Dfetch(xTexSingle4, i + 0*length);
float4 F1 = tex1Dfetch(xTexSingle4, i + 1*length);
float4 F2 = tex1Dfetch(xTexSingle4, i + 2*length);
float4 F3 = tex1Dfetch(xTexSingle4, i + 3*length);
float4 F4 = tex1Dfetch(xTexSingle4, i + 4*length);
float4 F5 = tex1Dfetch(xTexSingle4, i + 5*length);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertHSKernel(short2 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
float2 F0 = tex1Dfetch(xTexSingle2, i + 0*length);
float2 F1 = tex1Dfetch(xTexSingle2, i + 1*length);
float2 F2 = tex1Dfetch(xTexSingle2, i + 2*length);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertSHKernel(float4 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i<real_length) {
RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length);
res[0*length+i] = I0;
res[1*length+i] = I1;
res[2*length+i] = I2;
res[3*length+i] = I3;
res[4*length+i] = I4;
res[5*length+i] = I5;
i += gridSize;
}
}
__global__ void convertSHKernel(float2 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i<real_length) {
RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length);
res[0*length+i] = I0;
res[1*length+i] = I1;
res[2*length+i] = I2;
i += gridSize;
}
}
__global__ void convertHDKernel(short4 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
double2 F0 = fetch_double2(xTexDouble2, i+0*length);
double2 F1 = fetch_double2(xTexDouble2, i+1*length);
double2 F2 = fetch_double2(xTexDouble2, i+2*length);
double2 F3 = fetch_double2(xTexDouble2, i+3*length);
double2 F4 = fetch_double2(xTexDouble2, i+4*length);
double2 F5 = fetch_double2(xTexDouble2, i+5*length);
double2 F6 = fetch_double2(xTexDouble2, i+6*length);
double2 F7 = fetch_double2(xTexDouble2, i+7*length);
double2 F8 = fetch_double2(xTexDouble2, i+8*length);
double2 F9 = fetch_double2(xTexDouble2, i+9*length);
double2 F10 = fetch_double2(xTexDouble2, i+10*length);
double2 F11 = fetch_double2(xTexDouble2, i+11*length);
CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertHDKernel(short2 *h, float *norm, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
double2 F0 = fetch_double2(xTexDouble2, i+0*length);
double2 F1 = fetch_double2(xTexDouble2, i+1*length);
double2 F2 = fetch_double2(xTexDouble2, i+2*length);
CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, norm, F, length);
i += gridSize;
}
}
__global__ void convertDHKernel(double2 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length);
res[0*length+i] = make_double2(I0.x, I0.y);
res[1*length+i] = make_double2(I0.z, I0.w);
res[2*length+i] = make_double2(I1.x, I1.y);
res[3*length+i] = make_double2(I1.z, I1.w);
res[4*length+i] = make_double2(I2.x, I2.y);
res[5*length+i] = make_double2(I2.z, I2.w);
res[6*length+i] = make_double2(I3.x, I3.y);
res[7*length+i] = make_double2(I3.z, I3.w);
res[8*length+i] = make_double2(I4.x, I4.y);
res[9*length+i] = make_double2(I4.z, I4.w);
res[10*length+i] = make_double2(I5.x, I5.y);
res[11*length+i] = make_double2(I5.z, I5.w);
i += gridSize;
}
}
__global__ void convertDHKernelSt(double2 *res, int length, int real_length) {
int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while(i < real_length) {
RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length);
res[0*length+i] = make_double2(I0.x, I0.y);
res[1*length+i] = make_double2(I1.x, I1.y);
res[2*length+i] = make_double2(I2.x, I2.y);
i += gridSize;
}
}
void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.Nspin() != 1 && src.Nspin() != 4){
errorQuda("nSpin(%d) not supported in function %s, line %d\n", src.Nspin(), __FUNCTION__, __LINE__);
}
if ((dst.Precision() == QUDA_HALF_PRECISION || src.Precision() == QUDA_HALF_PRECISION) &&
(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET)) {
copyCuda(dst.Even(), src.Even());
copyCuda(dst.Odd(), src.Odd());
return;
}
// For a given dst precision, there are two non-trivial possibilities for the
// src precision. The higher one corresponds to kernel index 0 (in the table
// of block and grid dimensions), while the lower one corresponds to index 1.
int id;
if (src.Precision() == QUDA_DOUBLE_PRECISION ||
dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
id = 0;
} else {
id = 1;
}
setBlock(id, dst.Stride(), dst.Precision());
quda::blas_bytes += src.RealLength()*((int)src.Precision() + (int)dst.Precision());
if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
convertDSKernel<<<blasGrid, blasBlock>>>((double2*)dst.V(), (float4*)src.V(), src.Stride());
}else{ //src.Nspin() == 1
convertDSKernel<<<blasGrid, blasBlock>>>((double2*)dst.V(), (float2*)src.V(), src.Stride());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
convertSDKernel<<<blasGrid, blasBlock>>>((float4*)dst.V(), (double2*)src.V(), src.Stride());
}else{ //src.Nspin() ==1
convertSDKernel<<<blasGrid, blasBlock>>>((float2*)dst.V(), (double2*)src.V(), src.Stride());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
quda::blas_bytes += src.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(short);
if (src.Nspin() == 4){
cudaBindTexture(0, texHalf1, src.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, src.Norm(), spinor_bytes/12);
convertSHKernel<<<blasGrid, blasBlock>>>((float4*)dst.V(), src.Stride(), src.Volume());
cudaUnbindTexture(texHalf1);
cudaUnbindTexture(texNorm1);
}else{ //nSpin== 1;
cudaBindTexture(0, texHalfSt1, src.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, src.Norm(), spinor_bytes/3);
convertSHKernel<<<blasGrid, blasBlock>>>((float2*)dst.V(), src.Stride(), src.Volume());
cudaUnbindTexture(texHalfSt1);
cudaUnbindTexture(texNorm1);
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
quda::blas_bytes += dst.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(float);
if (src.Nspin() == 4){
cudaBindTexture(0, xTexSingle4, src.V(), spinor_bytes);
convertHSKernel<<<blasGrid, blasBlock>>>((short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
cudaUnbindTexture(xTexSingle4);
}else{ //nSpinr == 1
cudaBindTexture(0, xTexSingle2, src.V(), spinor_bytes);
convertHSKernel<<<blasGrid, blasBlock>>>((short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
cudaUnbindTexture(xTexSingle2);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
quda::blas_bytes += src.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(short);
if (src.Nspin() == 4){
cudaBindTexture(0, texHalf1, src.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, src.Norm(), spinor_bytes/12);
convertDHKernel<<<blasGrid, blasBlock>>>((double2*)dst.V(), src.Stride(), src.Volume());
cudaUnbindTexture(texHalf1);
cudaUnbindTexture(texNorm1);
}else{//nSpinr == 1
cudaBindTexture(0, texHalfSt1, src.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, src.Norm(), spinor_bytes/3);
convertDHKernelSt<<<blasGrid, blasBlock>>>((double2*)dst.V(), src.Stride(), src.Volume());
cudaUnbindTexture(texHalfSt1);
cudaUnbindTexture(texNorm1);
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
quda::blas_bytes += dst.Volume()*sizeof(float);
int spinor_bytes = src.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, src.V(), spinor_bytes);
if (src.Nspin() == 4){
convertHDKernel<<<blasGrid, blasBlock>>>((short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
}else{ //nSpinr == 1
convertHDKernel<<<blasGrid, blasBlock>>>((short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume());
}
cudaUnbindTexture(xTexDouble2);
} else {
cudaMemcpy(dst.V(), src.V(), dst.Bytes(), cudaMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
cudaMemcpy(dst.Norm(), src.Norm(), dst.Bytes()/(dst.Ncolor()*dst.Nspin()), cudaMemcpyDeviceToDevice);
quda::blas_bytes += 2*dst.RealLength()*sizeof(float);
}
}
cudaThreadSynchronize();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpbyKernel(Float a, Float2 *x, Float b, Float2 *y, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
y[i] = a*x[i] + b*y[i];
i += gridSize;
}
}
__global__ void axpbyHKernel(float a, float b, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AXPBY_FLOAT4(a, x0, b, y0);
AXPBY_FLOAT4(a, x1, b, y1);
AXPBY_FLOAT4(a, x2, b, y2);
AXPBY_FLOAT4(a, x3, b, y3);
AXPBY_FLOAT4(a, x4, b, y4);
AXPBY_FLOAT4(a, x5, b, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void axpbyHKernel(float a, float b, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
AXPBY_FLOAT2(a, x0, b, y0);
AXPBY_FLOAT2(a, x1, b, y1);
AXPBY_FLOAT2(a, x2, b, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = a*x[i] + b*y[i]
void axpbyCuda(const double &a, cudaColorSpinorField &x, const double &b, cudaColorSpinorField &y) {
setBlock(2, x.Length(), x.Precision());
checkSpinor(x, y);
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
axpbyKernel<<<blasGrid, blasBlock>>>(a, (double*)x.V(), b, (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
axpbyKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), (float)b, (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axpbyCuda(a, x.Even(), b, y.Even());
axpbyCuda(a, x.Odd(), b, y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
axpbyHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short4*)y.V(),
(float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1) {//staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
axpbyHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short2*)y.V(),
(float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 3*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float>
__global__ void xpyKernel(Float *x, Float *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] += x[i];
i += gridSize;
}
}
__global__ void xpyHKernel(short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
XPY_FLOAT4(x0, y0);
XPY_FLOAT4(x1, y1);
XPY_FLOAT4(x2, y2);
XPY_FLOAT4(x3, y3);
XPY_FLOAT4(x4, y4);
XPY_FLOAT4(x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void xpyHKernel(short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
XPY_FLOAT2(x0, y0);
XPY_FLOAT2(x1, y1);
XPY_FLOAT2(x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = x[i] + y[i]
void xpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(3, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
xpyKernel<<<blasGrid, blasBlock>>>((double*)x.V(), (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
xpyKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
xpyCuda(x.Even(), y.Even());
xpyCuda(x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
xpyHKernel<<<blasGrid, blasBlock>>>((short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
xpyHKernel<<<blasGrid, blasBlock>>>((short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpyKernel(Float a, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] += a*x[i];
i += gridSize;
}
}
__global__ void axpyHKernel(float a, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AXPY_FLOAT4(a, x0, y0);
AXPY_FLOAT4(a, x1, y1);
AXPY_FLOAT4(a, x2, y2);
AXPY_FLOAT4(a, x3, y3);
AXPY_FLOAT4(a, x4, y4);
AXPY_FLOAT4(a, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void axpyHKernel(float a, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
AXPY_FLOAT2(a, x0, y0);
AXPY_FLOAT2(a, x1, y1);
AXPY_FLOAT2(a, x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = a*x[i] + y[i]
void axpyCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(4, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
axpyKernel<<<blasGrid, blasBlock>>>(a, (double*)x.V(), (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
axpyKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axpyCuda(a, x.Even(), y.Even());
axpyCuda(a, x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
axpyHKernel<<<blasGrid, blasBlock>>>((float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
axpyHKernel<<<blasGrid, blasBlock>>>((float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 2*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void xpayKernel(const Float2 *x, Float a, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] = x[i] + a*y[i];
i += gridSize;
}
}
__global__ void xpayHKernel(float a, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
XPAY_FLOAT4(x0, a, y0);
XPAY_FLOAT4(x1, a, y1);
XPAY_FLOAT4(x2, a, y2);
XPAY_FLOAT4(x3, a, y3);
XPAY_FLOAT4(x4, a, y4);
XPAY_FLOAT4(x5, a, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void xpayHKernel(float a, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
XPAY_FLOAT2(x0, a, y0);
XPAY_FLOAT2(x1, a, y1);
XPAY_FLOAT2(x2, a, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = x[i] + a*y[i]
void xpayCuda(const cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(5, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
xpayKernel<<<blasGrid, blasBlock>>>((double*)x.V(), a, (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
xpayKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), (float)a, (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
xpayCuda(x.Even(), a, y.Even());
xpayCuda(x.Odd(), a, y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
xpayHKernel<<<blasGrid, blasBlock>>>((float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() ==1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
xpayHKernel<<<blasGrid, blasBlock>>>((float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 2*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float>
__global__ void mxpyKernel(Float *x, Float *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] -= x[i];
i += gridSize;
}
}
__global__ void mxpyHKernel(short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
MXPY_FLOAT4(x0, y0);
MXPY_FLOAT4(x1, y1);
MXPY_FLOAT4(x2, y2);
MXPY_FLOAT4(x3, y3);
MXPY_FLOAT4(x4, y4);
MXPY_FLOAT4(x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void mxpyHKernel(short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
MXPY_FLOAT2(x0, y0);
MXPY_FLOAT2(x1, y1);
MXPY_FLOAT2(x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] -= x[i] (minus x plus y)
void mxpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
setBlock(6, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
mxpyKernel<<<blasGrid, blasBlock>>>((double*)x.V(), (double*)y.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
mxpyKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), (float2*)y.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
mxpyCuda(x.Even(), y.Even());
mxpyCuda(x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
mxpyHKernel<<<blasGrid, blasBlock>>>((short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1) { //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
mxpyHKernel<<<blasGrid, blasBlock>>>((short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axKernel(Float a, Float2 *x, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
x[i] *= a;
i += gridSize;
}
}
__global__ void axHKernel(float a, short4 *xH, float *xN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
AX_FLOAT4(a, x0); AX_FLOAT4(a, x1); AX_FLOAT4(a, x2);
AX_FLOAT4(a, x3); AX_FLOAT4(a, x4); AX_FLOAT4(a, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
i += gridSize;
}
}
__global__ void axHKernel(float a, short2 *xH, float *xN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
AX_FLOAT2(a, x0); AX_FLOAT2(a, x1); AX_FLOAT2(a, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
i += gridSize;
}
}
// performs the operation x[i] = a*x[i]
void axCuda(const double &a, cudaColorSpinorField &x) {
setBlock(7, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
axKernel<<<blasGrid, blasBlock>>>(a, (double*)x.V(), x.Length());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
axKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axCuda(a, x.Even());
axCuda(a, x.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
axHKernel<<<blasGrid, blasBlock>>>((float)a, (short4*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume());
}else if (x.Nspin() ==1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
axHKernel<<<blasGrid, blasBlock>>>((float)a, (short2*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 2*x.Volume()*sizeof(float);
}
quda::blas_bytes += 2*x.RealLength()*x.Precision();
quda::blas_flops += x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpyDKernel(Float2 a, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z = READ_DOUBLE2_TEXTURE(x, i);
y[i].x += a.x*Z.x - a.y*Z.y;
y[i].y += a.y*Z.x + a.x*Z.y;
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpySKernel(Float2 a, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z = read_Float2(x, i);
y[i].x += a.x*Z.x - a.y*Z.y;
y[i].y += a.y*Z.x + a.x*Z.y;
i += gridSize;
}
}
__global__ void caxpyHKernel(float2 a, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
CAXPY_FLOAT4(a, x0, y0);
CAXPY_FLOAT4(a, x1, y1);
CAXPY_FLOAT4(a, x2, y2);
CAXPY_FLOAT4(a, x3, y3);
CAXPY_FLOAT4(a, x4, y4);
CAXPY_FLOAT4(a, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void caxpyHKernel(float2 a, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
CAXPY_FLOAT2(a, x0, y0);
CAXPY_FLOAT2(a, x1, y1);
CAXPY_FLOAT2(a, x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] += a*x[i]
void caxpyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
int length = x.Length()/2;
setBlock(8, length, x.Precision());
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 4*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
caxpyDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), (double2*)y.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
caxpySKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), (float2*)y.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpyCuda(a, x.Even(), y.Even());
caxpyCuda(a, x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
caxpyHKernel<<<blasGrid, blasBlock>>>(a2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
caxpyHKernel<<<blasGrid, blasBlock>>>(a2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpbyDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z1 = READ_DOUBLE2_TEXTURE(x, i);
Float2 Z2 = READ_DOUBLE2_TEXTURE(y, i);
y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y;
y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y;
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbySKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 Z1 = read_Float2(x, i);
Float2 Z2 = read_Float2(y, i);
y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y;
y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y;
i += gridSize;
}
}
__global__ void caxpbyHKernel(float2 a, float2 b, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
CAXPBY_FLOAT4(a, x0, b, y0);
CAXPBY_FLOAT4(a, x1, b, y1);
CAXPBY_FLOAT4(a, x2, b, y2);
CAXPBY_FLOAT4(a, x3, b, y3);
CAXPBY_FLOAT4(a, x4, b, y4);
CAXPBY_FLOAT4(a, x5, b, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void caxpbyHKernel(float2 a, float2 b, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
CAXPBY_FLOAT2(a, x0, b, y0);
CAXPBY_FLOAT2(a, x1, b, y1);
CAXPBY_FLOAT2(a, x2, b, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] = c*x[i] + b*y[i]
void caxpbyCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y) {
checkSpinor(x,y);
int length = x.Length()/2;
setBlock(9, length, x.Precision());
quda::blas_bytes += 3*x.RealLength()*x.Precision();
quda::blas_flops += 7*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
caxpbyDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
caxpbySKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbyCuda(a, x.Even(), b, y.Even());
caxpbyCuda(a, x.Odd(), b, y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
caxpbyHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
caxpbyHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 3*x.Volume()*sizeof(float);
}
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void cxpaypbzDKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 T1 = READ_DOUBLE2_TEXTURE(x, i);
Float2 T2 = READ_DOUBLE2_TEXTURE(y, i);
Float2 T3 = read_Float2(z, i);
T1.x += a.x*T2.x - a.y*T2.y;
T1.y += a.y*T2.x + a.x*T2.y;
T1.x += b.x*T3.x - b.y*T3.y;
T1.y += b.y*T3.x + b.x*T3.y;
z[i] = make_Float2(T1);
i += gridSize;
}
}
template <typename Float2>
__global__ void cxpaypbzSKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 T1 = read_Float2(x, i);
Float2 T2 = read_Float2(y, i);
Float2 T3 = read_Float2(z, i);
T1.x += a.x*T2.x - a.y*T2.y;
T1.y += a.y*T2.x + a.x*T2.y;
T1.x += b.x*T3.x - b.y*T3.y;
T1.y += b.y*T3.x + b.x*T3.y;
z[i] = make_Float2(T1);
i += gridSize;
}
}
__global__ void cxpaypbzHKernel(float2 a, float2 b, short4 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CXPAYPBZ_FLOAT4(x0, a, y0, b, z0);
CXPAYPBZ_FLOAT4(x1, a, y1, b, z1);
CXPAYPBZ_FLOAT4(x2, a, y2, b, z2);
CXPAYPBZ_FLOAT4(x3, a, y3, b, z3);
CXPAYPBZ_FLOAT4(x4, a, y4, b, z4);
CXPAYPBZ_FLOAT4(x5, a, y5, b, z5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride);
i += gridSize;
}
}
__global__ void cxpaypbzHKernel(float2 a, float2 b, short2 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CXPAYPBZ_FLOAT2(x0, a, y0, b, z0);
CXPAYPBZ_FLOAT2(x1, a, y1, b, z1);
CXPAYPBZ_FLOAT2(x2, a, y2, b, z2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride);
i += gridSize;
}
}
// performs the operation z[i] = x[i] + a*y[i] + b*z[i]
void cxpaypbzCuda(cudaColorSpinorField &x, const quda::Complex &a, cudaColorSpinorField &y,
const quda::Complex &b, cudaColorSpinorField &z) {
checkSpinor(x,y);
checkSpinor(x,z);
int length = x.Length()/2;
setBlock(10, length, x.Precision());
quda::blas_bytes += 4*x.RealLength()*x.Precision();
quda::blas_flops += 8*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
cxpaypbzDKernel<<<blasGrid, blasBlock>>>((double2*)x.V(), a2, (double2*)y.V(), b2, (double2*)z.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
cxpaypbzSKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), a2, (float2*)y.V(), b2, (float2*)z.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
cxpaypbzCuda(x.Even(), a, y.Even(), b, z.Even());
cxpaypbzCuda(x.Odd(), a, y.Odd(), b, z.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 4*x.Volume()*sizeof(float);
if (x.Nspin() ==4 ){//wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
cxpaypbzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
} else if (x.Nspin() ==1 ){//staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
cxpaypbzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpyBzpcxDKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = READ_DOUBLE2_TEXTURE(x, i);
Float2 z_i = READ_DOUBLE2_TEXTURE(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = b*z_i.x + c*x_i.x;
x[i].y = b*z_i.y + c*x_i.y;
i += gridSize;
}
}
template <typename Float, typename Float2>
__global__ void axpyBzpcxSKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = read_Float2(x, i);
Float2 z_i = read_Float2(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = b*z_i.x + c*x_i.x;
x[i].y = b*z_i.y + c*x_i.y;
i += gridSize;
}
}
__global__ void axpyBzpcxHKernel(float a, float b, float c, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
AXPY_FLOAT4(a, x0, y0);
AXPBY_FLOAT4(b, z0, c, x0);
AXPY_FLOAT4(a, x1, y1);
AXPBY_FLOAT4(b, z1, c, x1);
AXPY_FLOAT4(a, x2, y2);
AXPBY_FLOAT4(b, z2, c, x2);
AXPY_FLOAT4(a, x3, y3);
AXPBY_FLOAT4(b, z3, c, x3);
AXPY_FLOAT4(a, x4, y4);
AXPBY_FLOAT4(b, z4, c, x4);
AXPY_FLOAT4(a, x5, y5);
AXPBY_FLOAT4(b, z5, c, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
i += gridSize;
}
}
__global__ void axpyBzpcxHKernel(float a, float b, float c, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length)
{
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
AXPY_FLOAT2(a, x0, y0);
AXPBY_FLOAT2(b, z0, c, x0);
AXPY_FLOAT2(a, x1, y1);
AXPBY_FLOAT2(b, z1, c, x1);
AXPY_FLOAT2(a, x2, y2);
AXPBY_FLOAT2(b, z2, c, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
i += gridSize;
}
}
// performs the operations: {y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i]}
void axpyBzpcxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y, const double &b,
cudaColorSpinorField& z, const double &c)
{
checkSpinor(x,y);
checkSpinor(x,z);
setBlock(11, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
axpyBzpcxDKernel<<<blasGrid, blasBlock>>>(a, (double2*)x.V(), (double2*)y.V(), b, (double2*)z.V(), c, x.Length()/2);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
axpyBzpcxSKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), (float2*)y.V(), (float)b, (float2*)z.V(), (float)c, x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET){
axpyBzpcxCuda(a, x.Even(), y.Even(), b, z.Even(), c);
axpyBzpcxCuda(a, x.Odd(), y.Odd(), b, z.Odd(), c);
return ;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
axpyBzpcxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (float)c, (short4*)x.V(), (float*)x.Norm(),
(short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
axpyBzpcxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (float)c, (short2*)x.V(), (float*)x.Norm(),
(short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 5*x.Volume()*sizeof(float);
}
quda::blas_bytes += 5*x.RealLength()*x.Precision();
quda::blas_flops += 10*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float, typename Float2>
__global__ void axpyZpbxDKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = READ_DOUBLE2_TEXTURE(x, i);
Float2 z_i = READ_DOUBLE2_TEXTURE(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = z_i.x + b*x_i.x;
x[i].y = z_i.y + b*x_i.y;
i += gridSize;
}
}
template <typename Float, typename Float2>
__global__ void axpyZpbxSKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 x_i = read_Float2(x, i);
Float2 z_i = read_Float2(z, i);
y[i].x += a*x_i.x;
y[i].y += a*x_i.y;
x[i].x = z_i.x + b*x_i.x;
x[i].y = z_i.y + b*x_i.y;
i += gridSize;
}
}
__global__ void axpyZpbxHKernel(float a, float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AXPY_FLOAT4(a, x0, y0);
AXPY_FLOAT4(a, x1, y1);
AXPY_FLOAT4(a, x2, y2);
AXPY_FLOAT4(a, x3, y3);
AXPY_FLOAT4(a, x4, y4);
AXPY_FLOAT4(a, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
XPAY_FLOAT4(z0, b, x0);
XPAY_FLOAT4(z1, b, x1);
XPAY_FLOAT4(z2, b, x2);
XPAY_FLOAT4(z3, b, x3);
XPAY_FLOAT4(z4, b, x4);
XPAY_FLOAT4(z5, b, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
i += gridSize;
}
}
__global__ void axpyZpbxHKernel(float a, float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
AXPY_FLOAT2(a, x0, y0);
XPAY_FLOAT2(z0, b, x0);
AXPY_FLOAT2(a, x1, y1);
XPAY_FLOAT2(z1, b, x1);
AXPY_FLOAT2(a, x2, y2);
XPAY_FLOAT2(z2, b, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
i += gridSize;
}
}
// performs the operations: {y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i]}
void axpyZpbxCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y,
cudaColorSpinorField &z, const double &b) {
checkSpinor(x,y);
checkSpinor(x,z);
setBlock(12, x.Length(), x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
axpyZpbxDKernel<<<blasGrid, blasBlock>>>
(a, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), b, x.Length()/2);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
axpyZpbxSKernel<<<blasGrid, blasBlock>>>
((float)a, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), (float)b, x.Length()/2);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
axpyZpbxCuda(a, x.Even(), y.Even(), z.Even(), b);
axpyZpbxCuda(a, x.Odd(), y.Odd(), z.Odd(), b);
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() ==4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
axpyZpbxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short4*)x.V(), (float*)x.Norm(),
(short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
axpyZpbxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short2*)x.V(), (float*)x.Norm(),
(short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 5*x.Volume()*sizeof(float);
}
quda::blas_bytes += 5*x.RealLength()*x.Precision();
quda::blas_flops += 8*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpbypzYmbwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = READ_DOUBLE2_TEXTURE(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = READ_DOUBLE2_TEXTURE(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
Float2 W = read_Float2(w, i);
Y.x -= b.x*W.x - b.y*W.y;
Y.y -= b.y*W.x + b.x*W.y;
y[i] = make_Float2(Y);
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbypzYmbwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = read_Float2(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = read_Float2(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
Float2 W = read_Float2(w, i);
Y.x -= b.x*W.x - b.y*W.y;
Y.y -= b.y*W.x + b.x*W.y;
y[i] = make_Float2(Y);
i += gridSize;
}
}
__global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN,
short4 *zH, float *zN, float *wN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CAXPBYPZ_FLOAT4(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT4(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT4(a, x2, b, y2, z2);
CAXPBYPZ_FLOAT4(a, x3, b, y3, z3);
CAXPBYPZ_FLOAT4(a, x4, b, y4, z4);
CAXPBYPZ_FLOAT4(a, x5, b, y5, z5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride);
READ_HALF_SPINOR(w, texHalf4, stride);
float2 b2 = -wc*b;
CAXPY_FLOAT4(b2, w0, y0);
CAXPY_FLOAT4(b2, w1, y1);
CAXPY_FLOAT4(b2, w2, y2);
CAXPY_FLOAT4(b2, w3, y3);
CAXPY_FLOAT4(b2, w4, y4);
CAXPY_FLOAT4(b2, w5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN,
short2 *zH, float *zN, float *wN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CAXPBYPZ_FLOAT2(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT2(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT2(a, x2, b, y2, z2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride);
READ_HALF_SPINOR_ST(w, texHalfSt4, stride);
float2 b2 = -wc*b;
CAXPY_FLOAT2(b2, w0, y0);
CAXPY_FLOAT2(b2, w1, y1);
CAXPY_FLOAT2(b2, w2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i]
void caxpbypzYmbwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y,
cudaColorSpinorField &z, cudaColorSpinorField &w) {
checkSpinor(x,y);
checkSpinor(x,z);
checkSpinor(x,w);
int length = x.Length()/2;
setBlock(13, length, x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
caxpbypzYmbwDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(),
(double2*)z.V(), (double2*)w.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
caxpbypzYmbwSKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(),
(float2*)z.V(), (float2*)w.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbypzYmbwCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even());
caxpbypzYmbwCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 6*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf4, w.V(), spinor_bytes);
cudaBindTexture(0, texNorm4, w.Norm(), spinor_bytes/12);
caxpbypzYmbwHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(),
z.Stride(), z.Volume());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt4, w.V(), spinor_bytes);
cudaBindTexture(0, texNorm4, w.Norm(), spinor_bytes/3);
caxpbypzYmbwHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(),
z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
quda::blas_bytes += 6*x.RealLength()*x.Precision();
quda::blas_flops += 12*x.RealLength();
if (!blasTuning) checkCudaError();
}
#if (__CUDA_ARCH__ < 130)
// Computes c = a + b in "double single" precision.
__device__ void dsadd(volatile QudaSumFloat &c0, volatile QudaSumFloat &c1, const volatile QudaSumFloat &a0,
const volatile QudaSumFloat &a1, const float b0, const float b1) {
// Compute dsa + dsb using Knuth's trick.
QudaSumFloat t1 = a0 + b0;
QudaSumFloat e = t1 - a0;
QudaSumFloat t2 = ((b0 - e) + (a0 - (t1 - e))) + a1 + b1;
// The result is t1 + t2, after normalization.
c0 = e = t1 + t2;
c1 = t2 - (e - t1);
}
// Computes c = a + b in "double single" precision (complex version)
__device__ void zcadd(volatile QudaSumComplex &c0, volatile QudaSumComplex &c1, const volatile QudaSumComplex &a0,
const volatile QudaSumComplex &a1, const volatile QudaSumComplex &b0, const volatile QudaSumComplex &b1) {
// Compute dsa + dsb using Knuth's trick.
QudaSumFloat t1 = a0.x + b0.x;
QudaSumFloat e = t1 - a0.x;
QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x;
// The result is t1 + t2, after normalization.
c0.x = e = t1 + t2;
c1.x = t2 - (e - t1);
// Compute dsa + dsb using Knuth's trick.
t1 = a0.y + b0.y;
e = t1 - a0.y;
t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y;
// The result is t1 + t2, after normalization.
c0.y = e = t1 + t2;
c1.y = t2 - (e - t1);
}
// Computes c = a + b in "double single" precision (float3 version)
__device__ void dsadd3(volatile QudaSumFloat3 &c0, volatile QudaSumFloat3 &c1, const volatile QudaSumFloat3 &a0,
const volatile QudaSumFloat3 &a1, const volatile QudaSumFloat3 &b0, const volatile QudaSumFloat3 &b1) {
// Compute dsa + dsb using Knuth's trick.
QudaSumFloat t1 = a0.x + b0.x;
QudaSumFloat e = t1 - a0.x;
QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x;
// The result is t1 + t2, after normalization.
c0.x = e = t1 + t2;
c1.x = t2 - (e - t1);
// Compute dsa + dsb using Knuth's trick.
t1 = a0.y + b0.y;
e = t1 - a0.y;
t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y;
// The result is t1 + t2, after normalization.
c0.y = e = t1 + t2;
c1.y = t2 - (e - t1);
// Compute dsa + dsb using Knuth's trick.
t1 = a0.z + b0.z;
e = t1 - a0.z;
t2 = ((b0.z - e) + (a0.z - (t1 - e))) + a1.z + b1.z;
// The result is t1 + t2, after normalization.
c0.z = e = t1 + t2;
c1.z = t2 - (e - t1);
}
#endif
//
// double normCuda(float *a, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normD##suffix
#define REDUCE_TYPES Float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*a[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normS##suffix
#define REDUCE_TYPES Float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// double normHCuda(char *, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normH##suffix
#define REDUCE_TYPES Float *aN, int stride // dummy type
#define REDUCE_PARAMS aN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR(a, texHalf1, stride); \
REAL_DOT_FLOAT4(norm0, a0, a0); \
REAL_DOT_FLOAT4(norm1, a1, a1); \
REAL_DOT_FLOAT4(norm2, a2, a2); \
REAL_DOT_FLOAT4(norm3, a3, a3); \
REAL_DOT_FLOAT4(norm4, a4, a4); \
REAL_DOT_FLOAT4(norm5, a5, a5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4;
#define REDUCE_OPERATION(i) (ac*ac*norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) normHSt##suffix
#define REDUCE_TYPES Float *aN, int stride // dummy type
#define REDUCE_PARAMS aN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \
REAL_DOT_FLOAT2(norm0, a0, a0); \
REAL_DOT_FLOAT2(norm1, a1, a1); \
REAL_DOT_FLOAT2(norm2, a2, a2); \
norm0 += norm1; norm0 += norm2;
#define REDUCE_OPERATION(i) (ac*ac*norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double normCuda(const cudaColorSpinorField &a) {
if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) return normCuda(a.Even()) + normCuda(a.Odd());
const int id = 14;
quda::blas_flops += 2*a.RealLength();
quda::blas_bytes += a.RealLength()*a.Precision();
if (a.Precision() == QUDA_DOUBLE_PRECISION) {
return normDCuda((double*)a.V(), a.Length(), id, a.Precision());
} else if (a.Precision() == QUDA_SINGLE_PRECISION) {
return normSCuda((float2*)a.V(), a.Length()/2, id, a.Precision());
} else {
int spinor_bytes = a.Length()*sizeof(short);
int half_norm_ratio = (a.Ncolor()*a.Nspin()*2*sizeof(short))/sizeof(float);
quda::blas_bytes += (a.RealLength()*a.Precision()) / (a.Ncolor() * a.Nspin());
cudaBindTexture(0, texNorm1, a.Norm(), spinor_bytes/half_norm_ratio);
if (a.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, a.V(), spinor_bytes);
return normHCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else if (a.Nspin() == 1) { //staggered
cudaBindTexture(0, texHalfSt1, a.V(), spinor_bytes);
return normHStCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin());
return 0;
}
}
}
//
// double reDotProductFCuda(float *a, float *b, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductD##suffix
#define REDUCE_TYPES Float *a, Float *b
#define REDUCE_PARAMS a, b
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*b[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductS##suffix
#define REDUCE_TYPES Float *a, Float *b
#define REDUCE_PARAMS a, b
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// double reDotProductHCuda(float *a, float *b, int n) {}
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductH##suffix
#define REDUCE_TYPES Float *aN, Float *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR(a, texHalf1, stride); \
READ_HALF_SPINOR(b, texHalf2, stride); \
REAL_DOT_FLOAT4(rdot0, a0, b0); \
REAL_DOT_FLOAT4(rdot1, a1, b1); \
REAL_DOT_FLOAT4(rdot2, a2, b2); \
REAL_DOT_FLOAT4(rdot3, a3, b3); \
REAL_DOT_FLOAT4(rdot4, a4, b4); \
REAL_DOT_FLOAT4(rdot5, a5, b5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_OPERATION(i) (ac*bc*rdot0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) reDotProductHSt##suffix
#define REDUCE_TYPES Float *aN, Float *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_AUXILIARY(i) \
READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \
REAL_DOT_FLOAT2(rdot0, a0, b0); \
REAL_DOT_FLOAT2(rdot1, a1, b1); \
REAL_DOT_FLOAT2(rdot2, a2, b2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_OPERATION(i) (ac*bc*rdot0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double reDotProductCuda(cudaColorSpinorField &a, cudaColorSpinorField &b) {
if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
return reDotProductCuda(a.Even(), b.Even()) + reDotProductCuda(a.Odd(), b.Odd());
}
const int id = 15;
quda::blas_flops += 2*a.RealLength();
checkSpinor(a, b);
quda::blas_bytes += 2*a.RealLength()*a.Precision();
if (a.Precision() == QUDA_DOUBLE_PRECISION) {
return reDotProductDCuda((double*)a.V(), (double*)b.V(), a.Length(), id, a.Precision());
} else if (a.Precision() == QUDA_SINGLE_PRECISION) {
return reDotProductSCuda((float2*)a.V(), (float2*)b.V(), a.Length()/2, id, a.Precision());
} else {
quda::blas_bytes += 2*a.Volume()*sizeof(float);
int spinor_bytes = a.Length()*sizeof(short);
if (a.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, a.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, a.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, b.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, b.Norm(), spinor_bytes/12);
return reDotProductHCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else if (a.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, a.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, a.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, b.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, b.Norm(), spinor_bytes/3);
return reDotProductHStCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin());
return 0;
}
}
}
//
// double axpyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) axpyNormF##suffix
#define REDUCE_TYPES Float a, Float *x, Float *y
#define REDUCE_PARAMS a, x, y
#define REDUCE_AUXILIARY(i) y[i] = a*x[i] + y[i]
#define REDUCE_OPERATION(i) (y[i]*y[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix
#define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
AXPY_FLOAT4(a, x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
AXPY_FLOAT4(a, x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
AXPY_FLOAT4(a, x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
AXPY_FLOAT4(a, x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
AXPY_FLOAT4(a, x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
AXPY_FLOAT4(a, x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix
#define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
AXPY_FLOAT2(a, x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
AXPY_FLOAT2(a, x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
AXPY_FLOAT2(a, x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double axpyNormCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return axpyNormCuda(a, x.Even(), y.Even()) + axpyNormCuda(a, x.Odd(), y.Odd());
const int id = 16;
quda::blas_flops += 4*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 3*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
return axpyNormFCuda(a, (double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return axpyNormFCuda((float)a, (float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision());
} else {
cudaBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), x.Bytes());
cudaBindTexture(0, texHalf2, y.V(), x.Bytes());
return axpyNormHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), x.Bytes());
cudaBindTexture(0, texHalfSt2, y.V(), x.Bytes());
return axpyNormHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
//
// double xmyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = x[i] - y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) xmyNormF##suffix
#define REDUCE_TYPES Float *x, Float *y
#define REDUCE_PARAMS x, y
#define REDUCE_AUXILIARY(i) y[i] = x[i] - y[i]
#define REDUCE_OPERATION(i) (y[i]*y[i])
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix
#define REDUCE_TYPES Float *d1, Float *d2, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS d1, d2, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
XMY_FLOAT4(x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
XMY_FLOAT4(x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
XMY_FLOAT4(x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
XMY_FLOAT4(x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
XMY_FLOAT4(x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
XMY_FLOAT4(x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix
#define REDUCE_TYPES Float *d1, Float *d2, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS d1, d2, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
XMY_FLOAT2(x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
XMY_FLOAT2(x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
XMY_FLOAT2(x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double xmyNormCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return xmyNormCuda(x.Even(), y.Even()) + xmyNormCuda(x.Odd(), y.Odd());
const int id = 17;
quda::blas_flops += 3*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 3*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
return xmyNormFCuda((double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return xmyNormFCuda((float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision());
} else {
cudaBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() ==4 ){ //wilsin
cudaBindTexture(0, texHalf1, x.V(), x.Bytes());
cudaBindTexture(0, texHalf2, y.V(), x.Bytes());
return xmyNormHCuda((char*)0, (char*)0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){
cudaBindTexture(0, texHalfSt1, x.V(), x.Bytes());
cudaBindTexture(0, texHalfSt2, y.V(), x.Bytes());
return xmyNormHCuda((char*)0, (char*)0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
//
// double2 cDotProductCuda(float2 *x, float2 *y, int n) {}
//
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductD##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y, Float c
#define REDUCE_PARAMS x, y, c
#define REDUCE_REAL_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i);
#define REDUCE_IMAG_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i);
#define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductS##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y, Float c
#define REDUCE_PARAMS x, y, c
#define REDUCE_REAL_AUXILIARY(i) Float2 a = read_Float2(x, i);
#define REDUCE_IMAG_AUXILIARY(i) Float2 b = read_Float2(y, i);
#define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductH##suffix
#define REDUCE_TYPES Float *aN, Float2 *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_REAL_AUXILIARY(i) \
READ_HALF_SPINOR(a, texHalf1, stride); \
READ_HALF_SPINOR(b, texHalf2, stride); \
REAL_DOT_FLOAT4(rdot0, a0, b0); \
REAL_DOT_FLOAT4(rdot1, a1, b1); \
REAL_DOT_FLOAT4(rdot2, a2, b2); \
REAL_DOT_FLOAT4(rdot3, a3, b3); \
REAL_DOT_FLOAT4(rdot4, a4, b4); \
REAL_DOT_FLOAT4(rdot5, a5, b5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, a0, b0); \
IMAG_DOT_FLOAT4(idot1, a1, b1); \
IMAG_DOT_FLOAT4(idot2, a2, b2); \
IMAG_DOT_FLOAT4(idot3, a3, b3); \
IMAG_DOT_FLOAT4(idot4, a4, b4); \
IMAG_DOT_FLOAT4(idot5, a5, b5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0)
#define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductHSt##suffix
#define REDUCE_TYPES Float *aN, Float2 *bN, int stride
#define REDUCE_PARAMS aN, bN, stride
#define REDUCE_REAL_AUXILIARY(i) \
READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \
REAL_DOT_FLOAT2(rdot0, a0, b0); \
REAL_DOT_FLOAT2(rdot1, a1, b1); \
REAL_DOT_FLOAT2(rdot2, a2, b2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, a0, b0); \
IMAG_DOT_FLOAT2(idot1, a1, b1); \
IMAG_DOT_FLOAT2(idot2, a2, b2); \
idot0 += idot1; idot0 += idot2;
#define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0)
#define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
quda::Complex cDotProductCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cDotProductCuda(x.Even(), y.Even()) + cDotProductCuda(x.Odd(), y.Odd());
const int id = 18;
quda::blas_flops += 4*x.RealLength();
checkSpinor(x,y);
int length = x.Length()/2;
quda::blas_bytes += 2*x.RealLength()*x.Precision();
double2 dot;
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
char c = 0;
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
dot = cDotProductDCuda((double2*)x.V(), (double2*)y.V(), c, length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
char c = 0;
int spinor_bytes = x.Length()*sizeof(float);
cudaBindTexture(0, xTexSingle2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexSingle2, y.V(), spinor_bytes);
dot = cDotProductSCuda((float2*)x.V(), (float2*)y.V(), c, length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 2*x.Volume()*sizeof(float);
if (x.Nspin() == 4){
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
dot = cDotProductHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
dot = cDotProductHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
return quda::Complex(dot.x, dot.y);
}
//
// double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {}
//
// First performs the operation y = x + a*y
// Second returns complex dot product (z,y)
//
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyD##suffix
#define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z
#define REDUCE_PARAMS x, a, y, z
#define REDUCE_REAL_AUXILIARY(i) \
Float2 X = READ_DOUBLE2_TEXTURE(x, i); \
Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \
Float2 Z = READ_DOUBLE2_TEXTURE(z, i);
#define REDUCE_IMAG_AUXILIARY(i) y[i].x = X.x + a*Y.x; y[i].y = X.y + a*Y.y
#define REDUCE_REAL_OPERATION(i) (Z.x*y[i].x + Z.y*y[i].y)
#define REDUCE_IMAG_OPERATION(i) (Z.x*y[i].y - Z.y*y[i].x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyS##suffix
#define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z
#define REDUCE_PARAMS x, a, y, z
#define REDUCE_REAL_AUXILIARY(i) y[i].x = x[i].x + a*y[i].x
#define REDUCE_IMAG_AUXILIARY(i) y[i].y = x[i].y + a*y[i].y
#define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y)
#define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix
#define REDUCE_TYPES Float a, short4 *yH, Float2 *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
XPAY_FLOAT4(x0, a, y0); \
XPAY_FLOAT4(x1, a, y1); \
XPAY_FLOAT4(x2, a, y2); \
XPAY_FLOAT4(x3, a, y3); \
XPAY_FLOAT4(x4, a, y4); \
XPAY_FLOAT4(x5, a, y5); \
REAL_DOT_FLOAT4(rdot0, z0, y0); \
REAL_DOT_FLOAT4(rdot1, z1, y1); \
REAL_DOT_FLOAT4(rdot2, z2, y2); \
REAL_DOT_FLOAT4(rdot3, z3, y3); \
REAL_DOT_FLOAT4(rdot4, z4, y4); \
REAL_DOT_FLOAT4(rdot5, z5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, z0, y0); \
IMAG_DOT_FLOAT4(idot1, z1, y1); \
IMAG_DOT_FLOAT4(idot2, z2, y2); \
IMAG_DOT_FLOAT4(idot3, z3, y3); \
IMAG_DOT_FLOAT4(idot4, z4, y4); \
IMAG_DOT_FLOAT4(idot5, z5, y5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix
#define REDUCE_TYPES Float a, short2 *yH, Float2 *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
XPAY_FLOAT2(x0, a, y0); \
XPAY_FLOAT2(x1, a, y1); \
XPAY_FLOAT2(x2, a, y2); \
REAL_DOT_FLOAT2(rdot0, z0, y0); \
REAL_DOT_FLOAT2(rdot1, z1, y1); \
REAL_DOT_FLOAT2(rdot2, z2, y2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_IMAG_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, z0, y0); \
IMAG_DOT_FLOAT2(idot1, z1, y1); \
IMAG_DOT_FLOAT2(idot2, z2, y2); \
idot0 += idot1; idot0 += idot2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
quda::Complex xpaycDotzyCuda(cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y, cudaColorSpinorField &z) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return xpaycDotzyCuda(x.Even(), a, y.Even(), z.Even()) + xpaycDotzyCuda(x.Odd(), a, y.Odd(), z.Odd());
const int id = 19;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
checkSpinor(x,z);
int length = x.Length()/2;
quda::blas_bytes += 4*x.RealLength()*x.Precision();
double2 dot;
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
dot = xpaycDotzyDCuda((double2*)x.V(), a, (double2*)y.V(), (double2*)z.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
dot = xpaycDotzySCuda((float2*)x.V(), (float)a, (float2*)y.V(), (float2*)z.V(), length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 4*x.Volume()*sizeof(float);
if (x.Nspin() ==4 ){//wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
dot = xpaycDotzyHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() ==1 ){//wilson
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
dot = xpaycDotzyHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
return quda::Complex(dot.x, dot.y);
}
//
// double3 cDotProductNormACuda(float2 *a, float2 *b, int n) {}
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAD##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y
#define REDUCE_PARAMS x, y
#define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i);
#define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i);
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x)
#define REDUCE_Z_OPERATION(i) (a.x*a.x + a.y*a.y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAS##suffix
#define REDUCE_TYPES Float2 *a, Float2 *b
#define REDUCE_PARAMS a, b
#define REDUCE_X_AUXILIARY(i)
#define REDUCE_Y_AUXILIARY(i)
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y)
#define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x)
#define REDUCE_Z_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAH##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR(x, texHalf1, stride); \
READ_HALF_SPINOR(y, texHalf2, stride); \
REAL_DOT_FLOAT4(norm0, x0, x0); \
REAL_DOT_FLOAT4(norm1, x1, x1); \
REAL_DOT_FLOAT4(norm2, x2, x2); \
REAL_DOT_FLOAT4(norm3, x3, x3); \
REAL_DOT_FLOAT4(norm4, x4, x4); \
REAL_DOT_FLOAT4(norm5, x5, x5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT4(rdot0, x0, y0); \
REAL_DOT_FLOAT4(rdot1, x1, y1); \
REAL_DOT_FLOAT4(rdot2, x2, y2); \
REAL_DOT_FLOAT4(rdot3, x3, y3); \
REAL_DOT_FLOAT4(rdot4, x4, y4); \
REAL_DOT_FLOAT4(rdot5, x5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, x0, y0); \
IMAG_DOT_FLOAT4(idot1, x1, y1); \
IMAG_DOT_FLOAT4(idot2, x2, y2); \
IMAG_DOT_FLOAT4(idot3, x3, y3); \
IMAG_DOT_FLOAT4(idot4, x4, y4); \
IMAG_DOT_FLOAT4(idot5, x5, y5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (xc*xc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormAHSt##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \
REAL_DOT_FLOAT2(norm0, x0, x0); \
REAL_DOT_FLOAT2(norm1, x1, x1); \
REAL_DOT_FLOAT2(norm2, x2, x2); \
norm0 += norm1; norm0 += norm2;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT2(rdot0, x0, y0); \
REAL_DOT_FLOAT2(rdot1, x1, y1); \
REAL_DOT_FLOAT2(rdot2, x2, y2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, x0, y0); \
IMAG_DOT_FLOAT2(idot1, x1, y1); \
IMAG_DOT_FLOAT2(idot2, x2, y2); \
idot0 += idot1; idot0 += idot2;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (xc*xc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
double3 cDotProductNormACuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cDotProductNormACuda(x.Even(), y.Even()) + cDotProductNormACuda(x.Odd(), y.Odd());
const int id = 20;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
int length = x.Length()/2;
quda::blas_bytes += 2*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
return cDotProductNormADCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return cDotProductNormASCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 2*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
return cDotProductNormAHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
return cDotProductNormAHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
//
// double3 cDotProductNormBCuda(float2 *a, float2 *b, int n) {}
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBD##suffix
#define REDUCE_TYPES Float2 *x, Float2 *y
#define REDUCE_PARAMS x, y
#define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i);
#define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i);
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y)
#define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x)
#define REDUCE_Z_OPERATION(i) (b.x*b.x + b.y*b.y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBS##suffix
#define REDUCE_TYPES Float2 *a, Float2 *b
#define REDUCE_PARAMS a, b
#define REDUCE_X_AUXILIARY(i)
#define REDUCE_Y_AUXILIARY(i)
#define REDUCE_Z_AUXILIARY(i)
#define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y)
#define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x)
#define REDUCE_Z_OPERATION(i) (b[i].x*b[i].x + b[i].y*b[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBH##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR(x, texHalf1, stride); \
READ_HALF_SPINOR(y, texHalf2, stride); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT4(rdot0, x0, y0); \
REAL_DOT_FLOAT4(rdot1, x1, y1); \
REAL_DOT_FLOAT4(rdot2, x2, y2); \
REAL_DOT_FLOAT4(rdot3, x3, y3); \
REAL_DOT_FLOAT4(rdot4, x4, y4); \
REAL_DOT_FLOAT4(rdot5, x5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT4(idot0, x0, y0); \
IMAG_DOT_FLOAT4(idot1, x1, y1); \
IMAG_DOT_FLOAT4(idot2, x2, y2); \
IMAG_DOT_FLOAT4(idot3, x3, y3); \
IMAG_DOT_FLOAT4(idot4, x4, y4); \
IMAG_DOT_FLOAT4(idot5, x5, y5); \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (yc*yc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) cDotProductNormBHSt##suffix
#define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride
#define REDUCE_PARAMS xN, yN, stride
#define REDUCE_X_AUXILIARY(i) \
READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \
READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2;
#define REDUCE_Y_AUXILIARY(i) \
REAL_DOT_FLOAT2(rdot0, x0, y0); \
REAL_DOT_FLOAT2(rdot1, x1, y1); \
REAL_DOT_FLOAT2(rdot2, x2, y2); \
rdot0 += rdot1; rdot0 += rdot2;
#define REDUCE_Z_AUXILIARY(i) \
IMAG_DOT_FLOAT2(idot0, x0, y0); \
IMAG_DOT_FLOAT2(idot1, x1, y1); \
IMAG_DOT_FLOAT2(idot2, x2, y2); \
idot0 += idot1; idot0 += idot2;
#define REDUCE_X_OPERATION(i) (xc*yc*rdot0)
#define REDUCE_Y_OPERATION(i) (xc*yc*idot0)
#define REDUCE_Z_OPERATION(i) (yc*yc*norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
double3 cDotProductNormBCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cDotProductNormBCuda(x.Even(), y.Even()) + cDotProductNormBCuda(x.Odd(), y.Odd());
const int id = 21;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
int length = x.Length()/2;
quda::blas_bytes += 2*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
return cDotProductNormBDCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
return cDotProductNormBSCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision());
} else {
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 2*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
return cDotProductNormBHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
return cDotProductNormBHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
//
// double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y,
// float2 *z, float2 *w, float2 *u, int len)
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYD##suffix
#define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u
#define REDUCE_PARAMS a, x, b, y, z, w, u
#define REDUCE_X_AUXILIARY(i) \
Float2 X = READ_DOUBLE2_TEXTURE(x, i); \
Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \
Float2 W = READ_DOUBLE2_TEXTURE(w, i);
#define REDUCE_Y_AUXILIARY(i) \
Float2 Z = read_Float2(z, i); \
Z.x += a.x*X.x - a.y*X.y; \
Z.y += a.y*X.x + a.x*X.y; \
Z.x += b.x*Y.x - b.y*Y.y; \
Z.y += b.y*Y.x + b.x*Y.y; \
Y.x -= b.x*W.x - b.y*W.y; \
Y.y -= b.y*W.x + b.x*W.y;
#define REDUCE_Z_AUXILIARY(i) \
z[i] = make_Float2(Z); \
y[i] = make_Float2(Y);
#define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y)
#define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x)
#define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYS##suffix
#define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u
#define REDUCE_PARAMS a, x, b, y, z, w, u
#define REDUCE_X_AUXILIARY(i) \
Float2 X = read_Float2(x, i); \
Float2 Y = read_Float2(y, i); \
Float2 W = read_Float2(w, i);
#define REDUCE_Y_AUXILIARY(i) \
Float2 Z = read_Float2(z, i); \
Z.x += a.x*X.x - a.y*X.y; \
Z.y += a.y*X.x + a.x*X.y; \
Z.x += b.x*Y.x - b.y*Y.y; \
Z.y += b.y*Y.x + b.x*Y.y; \
Y.x -= b.x*W.x - b.y*W.y; \
Y.y -= b.y*W.x + b.x*W.y;
#define REDUCE_Z_AUXILIARY(i) \
z[i] = make_Float2(Z); \
y[i] = make_Float2(Y);
#define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y)
#define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x)
#define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
//
// double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y,
// float2 *z, float2 *w, float2 *u, int len)
//
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix
#define REDUCE_TYPES Float2 a, Float2 b, short4 *yH, float *yN, short4 *zH, float *zN, float *wN, float *uN, int stride
#define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride
#define REDUCE_X_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); \
CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); \
CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); \
CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); \
CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); \
CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); \
READ_HALF_SPINOR(w, texHalf4, stride); \
float2 bwc = -wc*b; \
CAXPY_FLOAT4(bwc, w0, y0); \
CAXPY_FLOAT4(bwc, w1, y1); \
CAXPY_FLOAT4(bwc, w2, y2); \
CAXPY_FLOAT4(bwc, w3, y3); \
CAXPY_FLOAT4(bwc, w4, y4); \
CAXPY_FLOAT4(bwc, w5, y5); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_Y_AUXILIARY(i) \
READ_HALF_SPINOR(u, texHalf5, stride); \
REAL_DOT_FLOAT4(rdot0, u0, y0); \
REAL_DOT_FLOAT4(rdot1, u1, y1); \
REAL_DOT_FLOAT4(rdot2, u2, y2); \
REAL_DOT_FLOAT4(rdot3, u3, y3); \
REAL_DOT_FLOAT4(rdot4, u4, y4); \
REAL_DOT_FLOAT4(rdot5, u5, y5); \
IMAG_DOT_FLOAT4(idot0, u0, y0); \
IMAG_DOT_FLOAT4(idot1, u1, y1); \
IMAG_DOT_FLOAT4(idot2, u2, y2); \
IMAG_DOT_FLOAT4(idot3, u3, y3); \
IMAG_DOT_FLOAT4(idot4, u4, y4); \
IMAG_DOT_FLOAT4(idot5, u5, y5);
#define REDUCE_Z_AUXILIARY(i) \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_X_OPERATION(i) (uc*rdot0)
#define REDUCE_Y_OPERATION(i) (uc*idot0)
#define REDUCE_Z_OPERATION(i) (norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
template <unsigned int reduce_threads, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix
#define REDUCE_TYPES Float2 a, Float2 b, short2 *yH, float *yN, short2 *zH, float *zN, float *wN, float *uN, int stride
#define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride
#define REDUCE_X_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); \
CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); \
CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); \
READ_HALF_SPINOR_ST(w, texHalfSt4, stride); \
float2 bwc = -wc*b; \
CAXPY_FLOAT2(bwc, w0, y0); \
CAXPY_FLOAT2(bwc, w1, y1); \
CAXPY_FLOAT2(bwc, w2, y2); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_Y_AUXILIARY(i) \
READ_HALF_SPINOR_ST(u, texHalfSt5, stride); \
REAL_DOT_FLOAT2(rdot0, u0, y0); \
REAL_DOT_FLOAT2(rdot1, u1, y1); \
REAL_DOT_FLOAT2(rdot2, u2, y2); \
IMAG_DOT_FLOAT2(idot0, u0, y0); \
IMAG_DOT_FLOAT2(idot1, u1, y1); \
IMAG_DOT_FLOAT2(idot2, u2, y2);
#define REDUCE_Z_AUXILIARY(i) \
norm0 += norm1; norm0 += norm2; \
rdot0 += rdot1; rdot0 += rdot2; \
idot0 += idot1; idot0 += idot2;
#define REDUCE_X_OPERATION(i) (uc*rdot0)
#define REDUCE_Y_OPERATION(i) (uc*idot0)
#define REDUCE_Z_OPERATION(i) (norm0)
#include "reduce_triple_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_X_AUXILIARY
#undef REDUCE_Y_AUXILIARY
#undef REDUCE_Z_AUXILIARY
#undef REDUCE_X_OPERATION
#undef REDUCE_Y_OPERATION
#undef REDUCE_Z_OPERATION
// This convoluted kernel does the following: z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y)
double3 caxpbypzYmbwcDotProductUYNormYCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y,
cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &u) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpbypzYmbwcDotProductUYNormYCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even(), u.Even()) +
caxpbypzYmbwcDotProductUYNormYCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd(), u.Odd());
const int id = 22;
quda::blas_flops += 18*x.RealLength();
checkSpinor(x,y);
checkSpinor(x,z);
checkSpinor(x,w);
checkSpinor(x,u);
int length = x.Length()/2;
quda::blas_bytes += 7*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
cudaBindTexture(0, wTexDouble2, w.V(), spinor_bytes);
cudaBindTexture(0, uTexDouble2, u.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYDCuda(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(),
(double2*)w.V(), (double2*)u.V(), length, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYSCuda(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(),
(float2*)w.V(), (float2*)u.V(), length, id, x.Precision());
} else {
// fused nSpin=4 kernel is slow on Fermi
// N.B. this introduces an extra half truncation so will affect convergence (for the better?)
if (!blasTuning && (__CUDA_ARCH__ >= 200) && x.Nspin() == 4) {
caxpbypzYmbwCuda(a, x, b, y, z, w);
return cDotProductNormBCuda(u, y);
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 7*x.Volume()*sizeof(float);
if (x.Nspin() == 4) { // wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf4, w.V(), spinor_bytes);
cudaBindTexture(0, texNorm4, w.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf5, u.V(), spinor_bytes);
cudaBindTexture(0, texNorm5, u.Norm(), spinor_bytes/12);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(),
y.Stride(), y.Volume(), id, x.Precision());
} else if (x.Nspin() == 1){ // staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt4, w.V(), spinor_bytes);
cudaBindTexture(0, texNorm4, w.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt5, u.V(), spinor_bytes);
cudaBindTexture(0, texNorm5, u.Norm(), spinor_bytes/3);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(),
y.Stride(), y.Volume(), id, x.Precision());
} else {
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
exit(-1);
}
template <typename Float, typename Float2>
__global__ void cabxpyAxKernel(Float a, Float2 b, Float2 *x, Float2 *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
x[i].x *= a;
x[i].y *= a;
y[i].x += b.x*x[i].x - b.y*x[i].y;
y[i].y += b.y*x[i].x + b.x*x[i].y;
i += gridSize;
}
}
__global__ void cabxpyAxHKernel(float a, float2 b, short4 *xH, float *xN, short4 *yH, float *yN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
AX_FLOAT4(a, x0);
AX_FLOAT4(a, x1);
AX_FLOAT4(a, x2);
AX_FLOAT4(a, x3);
AX_FLOAT4(a, x4);
AX_FLOAT4(a, x5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
CAXPY_FLOAT4(b, x0, y0);
CAXPY_FLOAT4(b, x1, y1);
CAXPY_FLOAT4(b, x2, y2);
CAXPY_FLOAT4(b, x3, y3);
CAXPY_FLOAT4(b, x4, y4);
CAXPY_FLOAT4(b, x5, y5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
i += gridSize;
}
}
__global__ void cabxpyAxHKernel(float a, float2 b, short2 *xH, float *xN, short2 *yH, float *yN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
AX_FLOAT2(a, x0);
AX_FLOAT2(a, x1);
AX_FLOAT2(a, x2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
CAXPY_FLOAT2(b, x0, y0);
CAXPY_FLOAT2(b, x1, y1);
CAXPY_FLOAT2(b, x2, y2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
i += gridSize;
}
}
// performs the operation y[i] += a*b*x[i], x[i] *= a
void cabxpyAxCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) {
checkSpinor(x,y);
int length = x.Length()/2;
setBlock(23, length, x.Precision());
quda::blas_bytes += 4*x.RealLength()*x.Precision();
quda::blas_flops += 5*x.RealLength();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 b2 = make_double2(real(b), imag(b));
cabxpyAxKernel<<<blasGrid, blasBlock>>>((double)a, b2, (double2*)x.V(), (double2*)y.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 b2 = make_float2(real(b), imag(b));
cabxpyAxKernel<<<blasGrid, blasBlock>>>((float)a, b2, (float2*)x.V(), (float2*)y.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpyCuda(a, x.Even(), y.Even());
caxpyCuda(a, x.Odd(), y.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
float2 b2 = make_float2(real(b), imag(b));
cabxpyAxHKernel<<<blasGrid, blasBlock>>>((float)a, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
float2 b2 = make_float2(real(b), imag(b));
cabxpyAxHKernel<<<blasGrid, blasBlock>>>((float)a, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
quda::blas_bytes += 4*x.Volume()*sizeof(float);
}
if (!blasTuning) checkCudaError();
}
//
// double caxpyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyNormF##suffix
#define REDUCE_TYPES Float a, Float *x, Float *y
#define REDUCE_PARAMS a, x, y
#define REDUCE_AUXILIARY(i) \
y[i].x += a.x*x[i].x - a.y*x[i].y; \
y[i].y += a.y*x[i].x + a.x*x[i].y
#define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix
#define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
CAXPY_FLOAT4(a, x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
CAXPY_FLOAT4(a, x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
CAXPY_FLOAT4(a, x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
CAXPY_FLOAT4(a, x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
CAXPY_FLOAT4(a, x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
CAXPY_FLOAT4(a, x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix
#define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
CAXPY_FLOAT2(a, x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
CAXPY_FLOAT2(a, x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
CAXPY_FLOAT2(a, x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double caxpyNormCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpyNormCuda(a, x.Even(), y.Even()) + caxpyNormCuda(a, x.Odd(), y.Odd());
const int id = 24;
quda::blas_flops += 6*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 3*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
return caxpyNormFCuda(a2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
return caxpyNormFCuda(a2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision());
} else {
cudaBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), x.Bytes());
cudaBindTexture(0, texHalf2, y.V(), x.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyNormHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), x.Bytes());
cudaBindTexture(0, texHalfSt2, y.V(), x.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyNormHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
//
// double caxpyXmayNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second performs the operator x[i] -= a*z[i]
// Third returns the norm of x
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXF##suffix
#define REDUCE_TYPES Float a, Float *x, Float *y, Float *z
#define REDUCE_PARAMS a, x, y, z
#define REDUCE_AUXILIARY(i) \
y[i].x += a.x*x[i].x - a.y*x[i].y; \
y[i].y += a.y*x[i].x + a.x*x[i].y; \
x[i].x += a.y*z[i].y - a.x*z[i].x; \
x[i].y -= (a.x*z[i].y + a.y*z[i].x);
#define REDUCE_OPERATION(i) (x[i].x*x[i].x + x[i].y*x[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix
#define REDUCE_TYPES Float a, short4 *xH, float *xN, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
CAXPY_FLOAT4(a, x0, y0); \
CMAXPY_FLOAT4(a, z0, x0); \
REAL_DOT_FLOAT4(norm0, x0, x0); \
CAXPY_FLOAT4(a, x1, y1); \
CMAXPY_FLOAT4(a, z1, x1); \
REAL_DOT_FLOAT4(norm1, x1, x1); \
CAXPY_FLOAT4(a, x2, y2); \
CMAXPY_FLOAT4(a, z2, x2); \
REAL_DOT_FLOAT4(norm2, x2, x2); \
CAXPY_FLOAT4(a, x3, y3); \
CMAXPY_FLOAT4(a, z3, x3); \
REAL_DOT_FLOAT4(norm3, x3, x3); \
CAXPY_FLOAT4(a, x4, y4); \
CMAXPY_FLOAT4(a, z4, x4); \
REAL_DOT_FLOAT4(norm4, x4, x4); \
CAXPY_FLOAT4(a, x5, y5); \
CMAXPY_FLOAT4(a, z5, x5); \
REAL_DOT_FLOAT4(norm5, x5, x5); \
norm0 += norm1; norm2 += norm3; \
norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix
#define REDUCE_TYPES Float a, short2 *xH, float *xN, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
CAXPY_FLOAT2(a, x0, y0); \
CMAXPY_FLOAT2(a, z0, x0); \
REAL_DOT_FLOAT2(norm0, x0, x0); \
CAXPY_FLOAT2(a, x1, y1); \
CMAXPY_FLOAT2(a, z1, x1); \
REAL_DOT_FLOAT2(norm1, x1, x1); \
CAXPY_FLOAT2(a, x2, y2); \
CMAXPY_FLOAT2(a, z2, x2); \
REAL_DOT_FLOAT2(norm2, x2, x2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double caxpyXmazNormXCuda(const quda::Complex &a, cudaColorSpinorField &x,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpyXmazNormXCuda(a, x.Even(), y.Even(), z.Even()) +
caxpyXmazNormXCuda(a, x.Odd(), y.Odd(), z.Odd());
const int id = 25;
quda::blas_flops += 10*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 5*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
return caxpyXmazNormXFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
return caxpyXmazNormXFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), x.Length()/2, id, x.Precision());
} else {
cudaBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm3, z.Norm(), z.Bytes()/(z.Ncolor()*z.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), x.Bytes());
cudaBindTexture(0, texHalf2, y.V(), x.Bytes());
cudaBindTexture(0, texHalf3, z.V(), z.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyXmazNormXHCuda(a2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), x.Bytes());
cudaBindTexture(0, texHalfSt2, y.V(), x.Bytes());
cudaBindTexture(0, texHalfSt3, z.V(), z.Bytes());
float2 a2 = make_float2(real(a), imag(a));
return caxpyXmazNormXHCuda(a2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
//
// double cabxpyAxNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) cabxpyAxNormF##suffix
#define REDUCE_TYPES Float a, Float b, Float *x, Float *y
#define REDUCE_PARAMS a, b, x, y
#define REDUCE_AUXILIARY(i) \
x[i].x *= a.x; \
x[i].y *= a.x; \
y[i].x += b.x*x[i].x - b.y*x[i].y; \
y[i].y += b.y*x[i].x + b.x*x[i].y;
#define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix
#define REDUCE_TYPES Float a, Float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride
#define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
AX_FLOAT4(a.x, x0); \
AX_FLOAT4(a.x, x1); \
AX_FLOAT4(a.x, x2); \
AX_FLOAT4(a.x, x3); \
AX_FLOAT4(a.x, x4); \
AX_FLOAT4(a.x, x5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); \
CAXPY_FLOAT4(b, x0, y0); \
REAL_DOT_FLOAT4(norm0, y0, y0); \
CAXPY_FLOAT4(b, x1, y1); \
REAL_DOT_FLOAT4(norm1, y1, y1); \
CAXPY_FLOAT4(b, x2, y2); \
REAL_DOT_FLOAT4(norm2, y2, y2); \
CAXPY_FLOAT4(b, x3, y3); \
REAL_DOT_FLOAT4(norm3, y3, y3); \
CAXPY_FLOAT4(b, x4, y4); \
REAL_DOT_FLOAT4(norm4, y4, y4); \
CAXPY_FLOAT4(b, x5, y5); \
REAL_DOT_FLOAT4(norm5, y5, y5); \
norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
template <unsigned int reduce_threads, typename Float>
#define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix
#define REDUCE_TYPES Float a, Float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride
#define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride
#define REDUCE_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
AX_FLOAT2(a.x, x0); \
AX_FLOAT2(a.x, x1); \
AX_FLOAT2(a.x, x2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); \
CAXPY_FLOAT2(b, x0, y0); \
REAL_DOT_FLOAT2(norm0, y0, y0); \
CAXPY_FLOAT2(b, x1, y1); \
REAL_DOT_FLOAT2(norm1, y1, y1); \
CAXPY_FLOAT2(b, x2, y2); \
REAL_DOT_FLOAT2(norm2, y2, y2); \
norm0 += norm1; norm0 += norm2; \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_OPERATION(i) (norm0)
#include "reduce_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double cabxpyAxNormCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return cabxpyAxNormCuda(a, b, x.Even(), y.Even()) + cabxpyAxNormCuda(a, b, x.Odd(), y.Odd());
const int id = 26;
quda::blas_flops += 7*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 4*x.RealLength()*x.Precision();
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(a, 0);
double2 b2 = make_double2(real(b), imag(b));
return cabxpyAxNormFCuda(a2, b2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(a, 0);
float2 b2 = make_float2(real(b), imag(b));
return cabxpyAxNormFCuda(a2, b2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision());
} else {
cudaBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), x.Bytes());
cudaBindTexture(0, texHalf2, y.V(), x.Bytes());
float2 a2 = make_float2(a, 0);
float2 b2 = make_float2(real(b), imag(b));
return cabxpyAxNormHCuda(a2, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), x.Bytes());
cudaBindTexture(0, texHalfSt2, y.V(), x.Bytes());
float2 a2 = make_float2(a, 0);
float2 b2 = make_float2(real(b), imag(b));
return cabxpyAxNormHCuda(a2, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
return 0;
}
}
}
template <typename Float2>
__global__ void caxpbypzDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = READ_DOUBLE2_TEXTURE(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = READ_DOUBLE2_TEXTURE(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbypzSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 X = read_Float2(x, i);
Float2 Z = read_Float2(z, i);
Z.x += a.x*X.x - a.y*X.y;
Z.y += a.y*X.x + a.x*X.y;
Float2 Y = read_Float2(y, i);
Z.x += b.x*Y.x - b.y*Y.y;
Z.y += b.y*Y.x + b.x*Y.y;
z[i] = make_Float2(Z);
i += gridSize;
}
}
__global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN,
short4 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CAXPBYPZ_FLOAT4(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT4(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT4(a, x2, b, y2, z2);
CAXPBYPZ_FLOAT4(a, x3, b, y3, z3);
CAXPBYPZ_FLOAT4(a, x4, b, y4, z4);
CAXPBYPZ_FLOAT4(a, x5, b, y5, z5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride);
i += gridSize;
}
}
__global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN,
short2 *zH, float *zN, int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CAXPBYPZ_FLOAT2(a, x0, b, y0, z0);
CAXPBYPZ_FLOAT2(a, x1, b, y1, z1);
CAXPBYPZ_FLOAT2(a, x2, b, y2, z2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride);
i += gridSize;
}
}
// performs the operation z[i] = a*x[i] + b*y[i] + z[i]
void caxpbypzCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b,
cudaColorSpinorField &y, cudaColorSpinorField &z) {
checkSpinor(x,y);
checkSpinor(x,z);
int length = x.Length()/2;
setBlock(27, length, x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
caxpbypzDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
caxpbypzSKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbypzCuda(a, x.Even(), b, y.Even(), z.Even());
caxpbypzCuda(a, x.Odd(), b, y.Odd(), z.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 6*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
caxpbypzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
caxpbypzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
quda::blas_bytes += 4*x.RealLength()*x.Precision();
quda::blas_flops += 8*x.RealLength();
if (!blasTuning) checkCudaError();
}
template <typename Float2>
__global__ void caxpbypczpwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y,
Float2 c, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 W = read_Float2(w, i);
Float2 X = READ_DOUBLE2_TEXTURE(x, i);
CAXPY_DOUBLE2(a, X, W);
Float2 Y = READ_DOUBLE2_TEXTURE(y, i);
CAXPY_DOUBLE2(b, Y, W);
Float2 Z = READ_DOUBLE2_TEXTURE(z, i);
CAXPY_DOUBLE2(c, Z, W);
w[i] = make_Float2(W);
i += gridSize;
}
}
template <typename Float2>
__global__ void caxpbypczpwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y,
Float2 c, Float2 *z, Float2 *w, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
Float2 W = read_Float2(w, i);
Float2 X = read_Float2(x, i);
CAXPY_FLOAT2(a, X, W);
Float2 Y = read_Float2(y, i);
CAXPY_FLOAT2(b, Y, W);
Float2 Z = read_Float2(z, i);
CAXPY_FLOAT2(c, Z, W);
w[i] = make_Float2(W);
i += gridSize;
}
}
__global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short4 *yH, float *yN,
short4 *zH, float *zN, short4* wH, float *wN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR(w, texHalf4, texNorm4, stride);
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride);
CAXPY_FLOAT4(a, x0, w0);
CAXPY_FLOAT4(a, x1, w1);
CAXPY_FLOAT4(a, x2, w2);
CAXPY_FLOAT4(a, x3, w3);
CAXPY_FLOAT4(a, x4, w4);
CAXPY_FLOAT4(a, x5, w5);
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride);
CAXPY_FLOAT4(b, y0, w0);
CAXPY_FLOAT4(b, y1, w1);
CAXPY_FLOAT4(b, y2, w2);
CAXPY_FLOAT4(b, y3, w3);
CAXPY_FLOAT4(b, y4, w4);
CAXPY_FLOAT4(b, y5, w5);
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride);
CAXPY_FLOAT4(c, z0, w0);
CAXPY_FLOAT4(c, z1, w1);
CAXPY_FLOAT4(c, z2, w2);
CAXPY_FLOAT4(c, z3, w3);
CAXPY_FLOAT4(c, z4, w4);
CAXPY_FLOAT4(c, z5, w5);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(wH, wN, w, stride);
i += gridSize;
}
}
__global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short2 *yH, float *yN,
short2 *zH, float *zN, short2 *wH, float *wN,
int stride, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
RECONSTRUCT_HALF_SPINOR_ST(w, texHalfSt4, texNorm4, stride);
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride);
CAXPY_FLOAT2(a, x0, w0);
CAXPY_FLOAT2(a, x1, w1);
CAXPY_FLOAT2(a, x2, w2);
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride);
CAXPY_FLOAT2(b, y0, w0);
CAXPY_FLOAT2(b, y1, w1);
CAXPY_FLOAT2(b, y2, w2);
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride);
CAXPY_FLOAT2(c, z0, w0);
CAXPY_FLOAT2(c, z1, w1);
CAXPY_FLOAT2(c, z2, w2);
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(wH, wN, w, stride);
i += gridSize;
}
}
// performs the operation z[i] = a*x[i] + b*y[i] + c*z[i] + w[i]
void caxpbypczpwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y,
const quda::Complex &c, cudaColorSpinorField &z, cudaColorSpinorField &w) {
checkSpinor(x,y);
checkSpinor(x,z);
checkSpinor(x,w);
int length = x.Length()/2;
setBlock(28, length, x.Precision());
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
int spinor_bytes = x.Length()*sizeof(double);
cudaBindTexture(0, xTexDouble2, x.V(), spinor_bytes);
cudaBindTexture(0, yTexDouble2, y.V(), spinor_bytes);
cudaBindTexture(0, zTexDouble2, z.V(), spinor_bytes);
cudaBindTexture(0, wTexDouble2, w.V(), spinor_bytes);
double2 a2 = make_double2(real(a), imag(a));
double2 b2 = make_double2(real(b), imag(b));
double2 c2 = make_double2(real(c), imag(c));
caxpbypczpwDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(),
c2, (double2*)z.V(), (double2*)w.V(), length);
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
float2 c2 = make_float2(real(c), imag(c));
caxpbypczpwSKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(),
c2, (float2*)z.V(), (float2*)w.V(), length);
} else {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) {
caxpbypczpwCuda(a, x.Even(), b, y.Even(), c, z.Even(), w.Even());
caxpbypczpwCuda(a, x.Odd(), b, y.Odd(), c, z.Odd(), w.Odd());
return;
}
int spinor_bytes = x.Length()*sizeof(short);
quda::blas_bytes += 6*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
float2 b2 = make_float2(real(b), imag(b));
float2 c2 = make_float2(real(c), imag(c));
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/12);
cudaBindTexture(0, texHalf4, w.V(), spinor_bytes);
cudaBindTexture(0, texNorm4, w.Norm(), spinor_bytes/12);
caxpbypczpwHKernel<<<blasGrid, blasBlock>>>(a2, b2, c2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(),
(short4*)z.V(), (float*)z.Norm(), (short4*)w.V(), (float*)w.Norm(),
z.Stride(), z.Volume());
} else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), spinor_bytes);
cudaBindTexture(0, texNorm1, x.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt2, y.V(), spinor_bytes);
cudaBindTexture(0, texNorm2, y.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt3, z.V(), spinor_bytes);
cudaBindTexture(0, texNorm3, z.Norm(), spinor_bytes/3);
cudaBindTexture(0, texHalfSt4, w.V(), spinor_bytes);
cudaBindTexture(0, texNorm4, w.Norm(), spinor_bytes/3);
caxpbypczpwHKernel<<<blasGrid, blasBlock>>>(a2, b2, c2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(),
(short2*)z.V(), (float*)z.Norm(), (short2*)w.V(), (float*)w.Norm(),
z.Stride(), z.Volume());
}else{
errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin());
}
}
quda::blas_bytes += 5*x.RealLength()*x.Precision();
quda::blas_flops += 12*x.RealLength();
if (!blasTuning) checkCudaError();
}
//
// double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the dot product (z,y)
//
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpyDotzyF##suffix
#define REDUCE_TYPES Float2 a, Float2 *x, Float2 *y, Float2 *z, Float c
#define REDUCE_PARAMS a, x, y, z, c
#define REDUCE_REAL_AUXILIARY(i) y[i].x += a.x*x[i].x - a.y*x[i].y;
#define REDUCE_IMAG_AUXILIARY(i) y[i].y += a.y*x[i].x + a.x*x[i].y;
#define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y)
#define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix
#define REDUCE_TYPES Float2 a, short4 *yH, Float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \
CAXPY_FLOAT4(a, x0, y0); \
CAXPY_FLOAT4(a, x1, y1); \
CAXPY_FLOAT4(a, x2, y2); \
CAXPY_FLOAT4(a, x3, y3); \
CAXPY_FLOAT4(a, x4, y4); \
CAXPY_FLOAT4(a, x5, y5); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride);
#define REDUCE_IMAG_AUXILIARY(i) \
REAL_DOT_FLOAT4(rdot0, z0, y0); \
REAL_DOT_FLOAT4(rdot1, z1, y1); \
REAL_DOT_FLOAT4(rdot2, z2, y2); \
REAL_DOT_FLOAT4(rdot3, z3, y3); \
REAL_DOT_FLOAT4(rdot4, z4, y4); \
REAL_DOT_FLOAT4(rdot5, z5, y5); \
IMAG_DOT_FLOAT4(idot0, z0, y0); \
IMAG_DOT_FLOAT4(idot1, z1, y1); \
IMAG_DOT_FLOAT4(idot2, z2, y2); \
IMAG_DOT_FLOAT4(idot3, z3, y3); \
IMAG_DOT_FLOAT4(idot4, z4, y4); \
IMAG_DOT_FLOAT4(idot5, z5, y5); \
rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \
idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4;
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
template <unsigned int reduce_threads, typename Float, typename Float2>
#define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix
#define REDUCE_TYPES Float2 a, short2 *yH, Float *yN, int stride
#define REDUCE_PARAMS a, yH, yN, stride
#define REDUCE_REAL_AUXILIARY(i) \
RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \
RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \
RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \
CAXPY_FLOAT2(a, x0, y0); \
CAXPY_FLOAT2(a, x1, y1); \
CAXPY_FLOAT2(a, x2, y2); \
CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride);
#define REDUCE_IMAG_AUXILIARY(i) \
REAL_DOT_FLOAT2(rdot0, z0, y0); \
REAL_DOT_FLOAT2(rdot1, z1, y1); \
REAL_DOT_FLOAT2(rdot2, z2, y2); \
IMAG_DOT_FLOAT2(idot0, z0, y0); \
IMAG_DOT_FLOAT2(idot1, z1, y1); \
IMAG_DOT_FLOAT2(idot2, z2, y2); \
rdot0 += rdot1; rdot0 += rdot2; \
idot0 += idot1; idot0 += idot2;
#define REDUCE_REAL_OPERATION(i) (rdot0)
#define REDUCE_IMAG_OPERATION(i) (idot0)
#include "reduce_complex_core.h"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_REAL_AUXILIARY
#undef REDUCE_IMAG_AUXILIARY
#undef REDUCE_REAL_OPERATION
#undef REDUCE_IMAG_OPERATION
quda::Complex caxpyDotzyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y,
cudaColorSpinorField &z) {
if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET)
return caxpyDotzyCuda(a, x.Even(), y.Even(), z.Even()) +
caxpyDotzyCuda(a, x.Odd(), y.Odd(), z.Odd());
const int id = 29;
quda::blas_flops += 8*x.RealLength();
checkSpinor(x,y);
quda::blas_bytes += 4*x.RealLength()*x.Precision();
double2 dot;
if (x.Precision() == QUDA_DOUBLE_PRECISION) {
char c = 0;
double2 a2 = make_double2(real(a), imag(a));
dot = caxpyDotzyFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), c, x.Length()/2, id, x.Precision());
} else if (x.Precision() == QUDA_SINGLE_PRECISION) {
char c = 0;
float2 a2 = make_float2(real(a), imag(a));
dot = caxpyDotzyFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), c, x.Length()/2, id, x.Precision());
} else {
cudaBindTexture(0, texNorm1, x.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm2, y.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
cudaBindTexture(0, texNorm3, z.Norm(), x.Bytes()/(x.Ncolor()*x.Nspin()));
quda::blas_bytes += 3*x.Volume()*sizeof(float);
float2 a2 = make_float2(real(a), imag(a));
if (x.Nspin() == 4){ //wilson
cudaBindTexture(0, texHalf1, x.V(), x.Bytes());
cudaBindTexture(0, texHalf2, y.V(), x.Bytes());
cudaBindTexture(0, texHalf3, z.V(), x.Bytes());
dot = caxpyDotzyHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else if (x.Nspin() == 1){ //staggered
cudaBindTexture(0, texHalfSt1, x.V(), x.Bytes());
cudaBindTexture(0, texHalfSt2, y.V(), x.Bytes());
cudaBindTexture(0, texHalfSt3, z.V(), x.Bytes());
dot = caxpyDotzyHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision());
}else{
errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin());
}
}
return quda::Complex(dot.x, dot.y);
}
|
5c70fe7e9bcbe71254031916c4fc67eb2d476121.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
} | 5c70fe7e9bcbe71254031916c4fc67eb2d476121.cu | #include "includes.h"
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
} |
8629cac1b5459bfc4caa68aaac760d35d8257238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=195 --blockDim=128
#include "common.h"
template <typename Real> __global__ void generatePaths(Real *const paths, hiprandState_t *const rngStates, const AsianOption<Real> *const option, const unsigned int numSims, const unsigned int numTimesteps);
template __global__ void generatePaths<float>(float *const paths, hiprandState_t *const rngStates, const AsianOption<float> *const option, const unsigned int numSims, const unsigned int numTimesteps);
__device__ static __attribute__((always_inline)) float getPathStep(float &drift, float &diffusion, hiprandState_t &state)
{
return expf(drift + diffusion * hiprand_normal(&state));
}
__device__ static __attribute__((always_inline)) double getPathStep(double &drift, double &diffusion, hiprandState_t &state)
{
return exp(drift + diffusion * hiprand_normal_double(&state));
}
// Path generation kernel
template <typename Real>
__global__ void generatePaths(Real *const paths,
hiprandState_t *const rngStates,
const AsianOption<Real> *const option,
const unsigned int numSims,
const unsigned int numTimesteps)
{
__requires(numSims == 100000);
__requires(numTimesteps == 87);
// Determine thread ID
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int step = gridDim.x * blockDim.x;
// Compute parameters
Real drift = (option->r - static_cast<Real>(0.5) * option->sigma * option->sigma) * option->dt;
Real diffusion = option->sigma * sqrt(option->dt);
// Initialise the RNG
hiprandState_t localState = rngStates[tid];
for (unsigned int i = tid ;
__global_invariant(i%step == tid),
__global_invariant(__write_implies(paths, (__write_offset_bytes(paths)/sizeof(Real))%numSims%step == tid)),
i < numSims ; i += step)
{
// Shift the output pointer
Real *output = paths + i;
// Simulate the path
Real s = static_cast<Real>(1);
for (unsigned int t = 0 ;
__invariant(__ptr_offset_bytes(output)/sizeof(Real) - i == t * numSims),
__global_invariant(__write_implies(paths, (__write_offset_bytes(paths)/sizeof(Real))%numSims%step == tid)),
t < numTimesteps ; t++, output += numSims)
{
s *= getPathStep(drift, diffusion, localState);
*output = s;
}
}
}
| 8629cac1b5459bfc4caa68aaac760d35d8257238.cu | //pass
//--gridDim=195 --blockDim=128
#include "common.h"
template <typename Real> __global__ void generatePaths(Real *const paths, curandState *const rngStates, const AsianOption<Real> *const option, const unsigned int numSims, const unsigned int numTimesteps);
template __global__ void generatePaths<float>(float *const paths, curandState *const rngStates, const AsianOption<float> *const option, const unsigned int numSims, const unsigned int numTimesteps);
__device__ static __attribute__((always_inline)) float getPathStep(float &drift, float &diffusion, curandState &state)
{
return expf(drift + diffusion * curand_normal(&state));
}
__device__ static __attribute__((always_inline)) double getPathStep(double &drift, double &diffusion, curandState &state)
{
return exp(drift + diffusion * curand_normal_double(&state));
}
// Path generation kernel
template <typename Real>
__global__ void generatePaths(Real *const paths,
curandState *const rngStates,
const AsianOption<Real> *const option,
const unsigned int numSims,
const unsigned int numTimesteps)
{
__requires(numSims == 100000);
__requires(numTimesteps == 87);
// Determine thread ID
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int step = gridDim.x * blockDim.x;
// Compute parameters
Real drift = (option->r - static_cast<Real>(0.5) * option->sigma * option->sigma) * option->dt;
Real diffusion = option->sigma * sqrt(option->dt);
// Initialise the RNG
curandState localState = rngStates[tid];
for (unsigned int i = tid ;
__global_invariant(i%step == tid),
__global_invariant(__write_implies(paths, (__write_offset_bytes(paths)/sizeof(Real))%numSims%step == tid)),
i < numSims ; i += step)
{
// Shift the output pointer
Real *output = paths + i;
// Simulate the path
Real s = static_cast<Real>(1);
for (unsigned int t = 0 ;
__invariant(__ptr_offset_bytes(output)/sizeof(Real) - i == t * numSims),
__global_invariant(__write_implies(paths, (__write_offset_bytes(paths)/sizeof(Real))%numSims%step == tid)),
t < numTimesteps ; t++, output += numSims)
{
s *= getPathStep(drift, diffusion, localState);
*output = s;
}
}
}
|
147b886dabdd172358ae5902ecfe476e2180edc1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdint.h>
#define CHECK(call)\
{\
const hipError_t error = call;\
if (error != hipSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(1);\
}\
}\
__global__ void sum_array_gpu(float *a, float *b, float *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
}
void sum_array_cpu(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++)
{
c[i] = a[i] + b[i];
}
}
void initData(float *data,int n)
{
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < n; i++)
data[i] = (float)(rand() & 0xFF) / 10.0f;
}
double cpu_sec()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void check_sum(float *c, float *g, int n)
{
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i < n; i++)
{
if (abs(c[i] - g[i]) > epsilon)
{
match = 0;
printf("Don't match!\n");
printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i);
break;
}
}
if (match)
printf("Array match\n\n");
return;
}
int main()
{
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nelem = 1 << 24;
printf("Vector size %d\n", nelem);
size_t nbytes = nelem * sizeof(float);
float *h_a, *h_b, *cpuref, *gpuref;
h_a = (float *)malloc(nbytes);
h_b = (float *)malloc(nbytes);
cpuref = (float *)malloc(nbytes);
gpuref = (float *)malloc(nbytes);
double istart, ielaps;
initData(h_a, nelem);
initData(h_b, nelem);
memset(cpuref, 0, nbytes);
memset(gpuref, 0, nbytes);
istart = cpu_sec();
sum_array_cpu(h_a, h_b, cpuref, nelem);
ielaps = cpu_sec() - istart;
printf("sum cpu time cost %f ms\n", ielaps*1000);
float *da, *db, *dc;
hipMalloc((float**)&da, nbytes);
hipMalloc((float**)&db, nbytes);
hipMalloc((float**)&dc, nbytes);
hipMemcpy(da, h_a, nbytes, hipMemcpyHostToDevice);
hipMemcpy(db, h_b, nbytes, hipMemcpyHostToDevice);
int len = 1024;
dim3 block(len);
dim3 grid((nelem+block.x-1)/block.x);
istart = cpu_sec();
hipLaunchKernelGGL(( sum_array_gpu), dim3(grid), dim3(block), 0, 0, da,db,dc);
hipDeviceSynchronize();
ielaps = cpu_sec() - istart;
printf("sum gpu <<<%d,%d>>> time cost %f ms\n", grid.x, block.x, ielaps*1000);
hipMemcpy(gpuref, dc, nbytes, hipMemcpyDeviceToHost);
check_sum(cpuref, gpuref, nelem);
hipFree(da);
hipFree(db);
hipFree(dc);
free(h_a);
free(h_b);
free(cpuref);
free(gpuref);
int c = getchar();
return 0;
}
| 147b886dabdd172358ae5902ecfe476e2180edc1.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdint.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1);\
}\
}\
__global__ void sum_array_gpu(float *a, float *b, float *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
}
void sum_array_cpu(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++)
{
c[i] = a[i] + b[i];
}
}
void initData(float *data,int n)
{
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < n; i++)
data[i] = (float)(rand() & 0xFF) / 10.0f;
}
double cpu_sec()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void check_sum(float *c, float *g, int n)
{
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i < n; i++)
{
if (abs(c[i] - g[i]) > epsilon)
{
match = 0;
printf("Don't match!\n");
printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i);
break;
}
}
if (match)
printf("Array match\n\n");
return;
}
int main()
{
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nelem = 1 << 24;
printf("Vector size %d\n", nelem);
size_t nbytes = nelem * sizeof(float);
float *h_a, *h_b, *cpuref, *gpuref;
h_a = (float *)malloc(nbytes);
h_b = (float *)malloc(nbytes);
cpuref = (float *)malloc(nbytes);
gpuref = (float *)malloc(nbytes);
double istart, ielaps;
initData(h_a, nelem);
initData(h_b, nelem);
memset(cpuref, 0, nbytes);
memset(gpuref, 0, nbytes);
istart = cpu_sec();
sum_array_cpu(h_a, h_b, cpuref, nelem);
ielaps = cpu_sec() - istart;
printf("sum cpu time cost %f ms\n", ielaps*1000);
float *da, *db, *dc;
cudaMalloc((float**)&da, nbytes);
cudaMalloc((float**)&db, nbytes);
cudaMalloc((float**)&dc, nbytes);
cudaMemcpy(da, h_a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(db, h_b, nbytes, cudaMemcpyHostToDevice);
int len = 1024;
dim3 block(len);
dim3 grid((nelem+block.x-1)/block.x);
istart = cpu_sec();
sum_array_gpu<<<grid, block>>>(da,db,dc);
cudaDeviceSynchronize();
ielaps = cpu_sec() - istart;
printf("sum gpu <<<%d,%d>>> time cost %f ms\n", grid.x, block.x, ielaps*1000);
cudaMemcpy(gpuref, dc, nbytes, cudaMemcpyDeviceToHost);
check_sum(cpuref, gpuref, nelem);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(h_a);
free(h_b);
free(cpuref);
free(gpuref);
int c = getchar();
return 0;
}
|
5ef46948b6d96b7c64dffaef7b49d976f5c69165.hip | // !!! This is a file automatically generated by hipify!!!
// system libraries
// use nvcc -o (output name) -Wno-deprecated-gpu-targets -std=c++11 -Xcompiler -fopenmp file_name.cu
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
// size definition. modify as needed
#define N 2000
#define T_SIZE 32
using namespace std;
// safe call definition
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number){
if(err!=hipSuccess){
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// safe call definition
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// initialize major row matrix
void initializeMatrix(float *ip, const int nxy){
srand (static_cast <unsigned> (time(0)));
float random;
for(int i = 0; i < nxy; i++){
random = 1.0 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(10.0-1.0)));
ip[i] = random;
}
return;
}
// utility function to check result
void checkResult(float *hostRef, float *gpuRef, const int nxy){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < nxy; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// multiply matrix on host
void multiplyMatrixOnHost(float *A, float *B, float *C, const int nx){
for(int i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// function to multiply matrix on host with threads
void multiplyMatrixOnHostThreads(float *A, float *B, float *C, const int nx){
int i = 0;
// use the pragma directive to automatically paralelize
#pragma omp parallel for private(i) shared(A, B, C)
for(i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// kernel to multiply matrix on gpu
__global__ void multiplyMatrixOnGPU(float *A, float *B, float *C, const int nx){
// get ix and iy from cuda defined variables
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0;
if (ix < nx && iy < nx){
for(int i = 0; i < nx ; i++)
sum += A[iy * nx + i] * B[i * nx + ix];
C[iy * nx + ix] = sum;
}
}
// Kernel GPU Tiles
__global__ void multiplyMatrixOnGPUTiles(float *A, float *B, float *C, const int nx){
// Create the shared memory space as tiles
__shared__ float tileOne[T_SIZE][T_SIZE], tileTwo[T_SIZE][T_SIZE];
// Get the ix and iy indexes
unsigned int ix = T_SIZE * blockIdx.x + threadIdx.x;
unsigned int iy = T_SIZE * blockIdx.y + threadIdx.y;
// int limit = (T_SIZE + nx - 1)/T_SIZE;
// Get other limit to experiment
int limit = ceilf(((float)T_SIZE + (float)nx)/(float)T_SIZE);
// Partial Sum acumulator
float partialSum = 0.0;
int i = 0;
while(i < limit){
// Fetch values for each value of the tiles with restriction
if ((iy < nx) && ((i * T_SIZE + threadIdx.x) < nx)){
int id = (iy * nx) + (i * T_SIZE) + threadIdx.x;
tileOne[threadIdx.y][threadIdx.x] = A[id];
}else{
tileOne[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// cuPrintf(""); <--- deprecated
// printf("Improper Tile Size in X domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
// Fetch values for each value of the tiles with restriction
if ((ix < nx) && ((i * T_SIZE + threadIdx.y) < nx)){
int id = (i * T_SIZE + threadIdx.y) * nx + ix;
tileTwo[threadIdx.y][threadIdx.x] = B[id];
}else{
tileTwo[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// printf("Improper Tile Size in Y domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
//Perform partial sum on tile
#pragma unroll // T_SIZE is constant
for (int j = 0; j < T_SIZE; j++){
partialSum += tileOne[threadIdx.y][j] * tileTwo[j][threadIdx.x];
}
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
//printf("Partial Sum fetched with value %f\n", partialSum);
// Wait for threads to finish
__syncthreads();
i++;
}
if (ix < nx && iy < nx)
C[((blockIdx.y * blockDim.y + threadIdx.y) * nx) + (blockIdx.x * blockDim.x) + threadIdx.x] = partialSum;
}
int main(int argc, char* argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
int nx = N;
int ny = N;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float*);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *hostRefThreads = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
float *gpuRefTiles = (float *)malloc(nBytes);
// initialize matrix
initializeMatrix(h_A, nxy);
initializeMatrix(h_B, nxy);
// initialize to 0
memset(hostRef, 0, nBytes);
memset(hostRefThreads, 0, nBytes);
memset(gpuRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
// // multiply matrix on host
// auto start_cpu = std::chrono::high_resolution_clock::now();
// multiplyMatrixOnHost(h_A, h_B, hostRef, nx);
// auto end_cpu = std::chrono::high_resolution_clock::now();
// std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
// printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count());
// // multiply matrix on host with threads
// start_cpu = std::chrono::high_resolution_clock::now();
// multiplyMatrixOnHostThreads(h_A, h_B, hostRefThreads, nx);
// end_cpu = std::chrono::high_resolution_clock::now();
// duration_ms = end_cpu - start_cpu;
// printf("multiplyMatrixOnHostThreads elapsed %f ms\n", duration_ms.count());
// // check results
// checkResult(hostRef, hostRefThreads, nx);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC, *d_MatD;
SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
SAFE_CALL(hipMalloc((void **)&d_MatD, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB");
SAFE_CALL(hipMemset(d_MatC, 0, nBytes), "Error copying d_MatB");
SAFE_CALL(hipMemset(d_MatD, 0, nBytes), "Error copying d_MatB");
// kernel definition and launch
dim3 block(T_SIZE, T_SIZE);
// use other grid to experiment
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
dim3 grid((int)ceil((float)nx / T_SIZE), (int)ceil((float)nx / T_SIZE));
// launch
auto start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiplyMatrixOnGPU), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
auto end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPU elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
//checkResult(hostRef, gpuRef, nx);
// GPU TILE VERSION AND COMPARISSON
// launch
start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiplyMatrixOnGPUTiles), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatD, nx);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPUTiles elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRefTiles, d_MatD, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(gpuRef, gpuRefTiles, nx);
// END GPU TILE VERSION AND COMPARISSON
// free device global memory
SAFE_CALL(hipFree(d_MatA), "Error freeing memory");
SAFE_CALL(hipFree(d_MatB), "Error freeing memory");
SAFE_CALL(hipFree(d_MatC), "Error freeing memory");
SAFE_CALL(hipFree(d_MatD), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(hostRefThreads);
free(gpuRef);
free(gpuRefTiles);
// reset device
SAFE_CALL(hipDeviceReset(), "Error reseting");
return (0);
}
| 5ef46948b6d96b7c64dffaef7b49d976f5c69165.cu | // system libraries
// use nvcc -o (output name) -Wno-deprecated-gpu-targets -std=c++11 -Xcompiler -fopenmp file_name.cu
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
// size definition. modify as needed
#define N 2000
#define T_SIZE 32
using namespace std;
// safe call definition
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number){
if(err!=cudaSuccess){
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// safe call definition
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// initialize major row matrix
void initializeMatrix(float *ip, const int nxy){
srand (static_cast <unsigned> (time(0)));
float random;
for(int i = 0; i < nxy; i++){
random = 1.0 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(10.0-1.0)));
ip[i] = random;
}
return;
}
// utility function to check result
void checkResult(float *hostRef, float *gpuRef, const int nxy){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < nxy; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// multiply matrix on host
void multiplyMatrixOnHost(float *A, float *B, float *C, const int nx){
for(int i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// function to multiply matrix on host with threads
void multiplyMatrixOnHostThreads(float *A, float *B, float *C, const int nx){
int i = 0;
// use the pragma directive to automatically paralelize
#pragma omp parallel for private(i) shared(A, B, C)
for(i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// kernel to multiply matrix on gpu
__global__ void multiplyMatrixOnGPU(float *A, float *B, float *C, const int nx){
// get ix and iy from cuda defined variables
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0;
if (ix < nx && iy < nx){
for(int i = 0; i < nx ; i++)
sum += A[iy * nx + i] * B[i * nx + ix];
C[iy * nx + ix] = sum;
}
}
// Kernel GPU Tiles
__global__ void multiplyMatrixOnGPUTiles(float *A, float *B, float *C, const int nx){
// Create the shared memory space as tiles
__shared__ float tileOne[T_SIZE][T_SIZE], tileTwo[T_SIZE][T_SIZE];
// Get the ix and iy indexes
unsigned int ix = T_SIZE * blockIdx.x + threadIdx.x;
unsigned int iy = T_SIZE * blockIdx.y + threadIdx.y;
// int limit = (T_SIZE + nx - 1)/T_SIZE;
// Get other limit to experiment
int limit = ceilf(((float)T_SIZE + (float)nx)/(float)T_SIZE);
// Partial Sum acumulator
float partialSum = 0.0;
int i = 0;
while(i < limit){
// Fetch values for each value of the tiles with restriction
if ((iy < nx) && ((i * T_SIZE + threadIdx.x) < nx)){
int id = (iy * nx) + (i * T_SIZE) + threadIdx.x;
tileOne[threadIdx.y][threadIdx.x] = A[id];
}else{
tileOne[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// cuPrintf(""); <--- deprecated
// printf("Improper Tile Size in X domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
// Fetch values for each value of the tiles with restriction
if ((ix < nx) && ((i * T_SIZE + threadIdx.y) < nx)){
int id = (i * T_SIZE + threadIdx.y) * nx + ix;
tileTwo[threadIdx.y][threadIdx.x] = B[id];
}else{
tileTwo[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// printf("Improper Tile Size in Y domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
//Perform partial sum on tile
#pragma unroll // T_SIZE is constant
for (int j = 0; j < T_SIZE; j++){
partialSum += tileOne[threadIdx.y][j] * tileTwo[j][threadIdx.x];
}
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
//printf("Partial Sum fetched with value %f\n", partialSum);
// Wait for threads to finish
__syncthreads();
i++;
}
if (ix < nx && iy < nx)
C[((blockIdx.y * blockDim.y + threadIdx.y) * nx) + (blockIdx.x * blockDim.x) + threadIdx.x] = partialSum;
}
int main(int argc, char* argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
int nx = N;
int ny = N;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float*);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *hostRefThreads = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
float *gpuRefTiles = (float *)malloc(nBytes);
// initialize matrix
initializeMatrix(h_A, nxy);
initializeMatrix(h_B, nxy);
// initialize to 0
memset(hostRef, 0, nBytes);
memset(hostRefThreads, 0, nBytes);
memset(gpuRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
// // multiply matrix on host
// auto start_cpu = std::chrono::high_resolution_clock::now();
// multiplyMatrixOnHost(h_A, h_B, hostRef, nx);
// auto end_cpu = std::chrono::high_resolution_clock::now();
// std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
// printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count());
// // multiply matrix on host with threads
// start_cpu = std::chrono::high_resolution_clock::now();
// multiplyMatrixOnHostThreads(h_A, h_B, hostRefThreads, nx);
// end_cpu = std::chrono::high_resolution_clock::now();
// duration_ms = end_cpu - start_cpu;
// printf("multiplyMatrixOnHostThreads elapsed %f ms\n", duration_ms.count());
// // check results
// checkResult(hostRef, hostRefThreads, nx);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC, *d_MatD;
SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
SAFE_CALL(cudaMalloc((void **)&d_MatD, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB");
SAFE_CALL(cudaMemset(d_MatC, 0, nBytes), "Error copying d_MatB");
SAFE_CALL(cudaMemset(d_MatD, 0, nBytes), "Error copying d_MatB");
// kernel definition and launch
dim3 block(T_SIZE, T_SIZE);
// use other grid to experiment
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
dim3 grid((int)ceil((float)nx / T_SIZE), (int)ceil((float)nx / T_SIZE));
// launch
auto start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnGPU<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
auto end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPU elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
//checkResult(hostRef, gpuRef, nx);
// GPU TILE VERSION AND COMPARISSON
// launch
start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnGPUTiles<<<grid, block>>>(d_MatA, d_MatB, d_MatD, nx);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPUTiles elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRefTiles, d_MatD, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(gpuRef, gpuRefTiles, nx);
// END GPU TILE VERSION AND COMPARISSON
// free device global memory
SAFE_CALL(cudaFree(d_MatA), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatB), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatC), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatD), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(hostRefThreads);
free(gpuRef);
free(gpuRefTiles);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return (0);
}
|
8b7c92097cccca4472b708e594cfdc042f4bb6a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Divide( float * x, size_t idx, size_t N, float W0, float W1)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
if (abs(x[(idx-2)*N+i]) > 0.00000001)
x[(idx-2)*N+i] = ((W0*x[(idx-1)*N+i]) / (W1*x[(idx-2)*N+i]));
else
x[(idx-2)*N+i] = 1.0 ;
//printf("Result is %f\n", x[(idx-2)*N+i]);
}
return;
} | 8b7c92097cccca4472b708e594cfdc042f4bb6a1.cu | #include "includes.h"
__global__ void Divide( float * x, size_t idx, size_t N, float W0, float W1)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
if (abs(x[(idx-2)*N+i]) > 0.00000001)
x[(idx-2)*N+i] = ((W0*x[(idx-1)*N+i]) / (W1*x[(idx-2)*N+i]));
else
x[(idx-2)*N+i] = 1.0 ;
//printf("Result is %f\n", x[(idx-2)*N+i]);
}
return;
} |
stationarity.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tsa/stationarity.h>
#include <raft/core/handle.hpp>
#include <timeSeries/stationarity.cuh>
namespace ML {
namespace Stationarity {
template <typename DataT>
inline void kpss_test_helper(const raft::handle_t& handle,
const DataT* d_y,
bool* results,
int batch_size,
int n_obs,
int d,
int D,
int s,
DataT pval_threshold)
{
const auto& handle_impl = handle;
hipStream_t stream = handle_impl.get_stream();
MLCommon::TimeSeries::kpss_test(d_y, results, batch_size, n_obs, d, D, s, stream, pval_threshold);
}
void kpss_test(const raft::handle_t& handle,
const float* d_y,
bool* results,
int batch_size,
int n_obs,
int d,
int D,
int s,
float pval_threshold)
{
kpss_test_helper<float>(handle, d_y, results, batch_size, n_obs, d, D, s, pval_threshold);
}
void kpss_test(const raft::handle_t& handle,
const double* d_y,
bool* results,
int batch_size,
int n_obs,
int d,
int D,
int s,
double pval_threshold)
{
kpss_test_helper<double>(handle, d_y, results, batch_size, n_obs, d, D, s, pval_threshold);
}
} // namespace Stationarity
} // namespace ML
| stationarity.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/tsa/stationarity.h>
#include <raft/core/handle.hpp>
#include <timeSeries/stationarity.cuh>
namespace ML {
namespace Stationarity {
template <typename DataT>
inline void kpss_test_helper(const raft::handle_t& handle,
const DataT* d_y,
bool* results,
int batch_size,
int n_obs,
int d,
int D,
int s,
DataT pval_threshold)
{
const auto& handle_impl = handle;
cudaStream_t stream = handle_impl.get_stream();
MLCommon::TimeSeries::kpss_test(d_y, results, batch_size, n_obs, d, D, s, stream, pval_threshold);
}
void kpss_test(const raft::handle_t& handle,
const float* d_y,
bool* results,
int batch_size,
int n_obs,
int d,
int D,
int s,
float pval_threshold)
{
kpss_test_helper<float>(handle, d_y, results, batch_size, n_obs, d, D, s, pval_threshold);
}
void kpss_test(const raft::handle_t& handle,
const double* d_y,
bool* results,
int batch_size,
int n_obs,
int d,
int D,
int s,
double pval_threshold)
{
kpss_test_helper<double>(handle, d_y, results, batch_size, n_obs, d, D, s, pval_threshold);
}
} // namespace Stationarity
} // namespace ML
|
5f1748ea6022c55860d65d6711b57a621f63950c.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int> get_packed_layout_stride(
std::vector<int> const& extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error(
"Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id, std::vector<int> const& extent) {
std::vector<int> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(
extent);
break;
case library::LayoutTypeID::kRowMajor:
stride =
get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(
extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(
extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(
extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride =
get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(
extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride =
get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(
extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride =
get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(
extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride =
get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(
extent);
break;
default:
break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(void* bytes, library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int>& stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(bytes, layout_id, extent, stride);
} else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching "
"Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout*>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(void* bytes,
library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int>& stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<2>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<4>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<16>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<64>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(
bytes, layout_id, extent, stride);
default:
break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation()
: type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {}
DeviceAllocation::DeviceAllocation(library::NumericTypeID type, size_t capacity)
: type_(type),
batch_stride_(capacity),
capacity_(capacity),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
hipError_t result = hipMalloc((void**)&pointer_, bytes(type, capacity));
if (result != hipSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int> const& stride,
int batch_count)
: type_(type),
batch_stride_(size_t(0)),
capacity_(size_t(0)),
pointer_(nullptr),
batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
hipFree(pointer_);
}
}
DeviceAllocation& DeviceAllocation::reset() {
if (pointer_) {
hipFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation& DeviceAllocation::reset(library::NumericTypeID type,
size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
hipError_t result = hipMalloc((void**)&pointer_, bytes(type_, capacity_));
if (result != hipSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation& DeviceAllocation::reset(library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int> const& stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(
sizeof(pointer_) +
(sizeof(int) * library::get_layout_stride_rank(layout_id)),
0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ =
construct_layout(tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id, extent, stride_);
capacity_ = batch_stride_ * batch_count_;
hipError_t result = hipMalloc((void**)&pointer_, bytes(type, capacity_));
if (result != hipSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void* DeviceAllocation::data() const {
return pointer_;
}
void* DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char*>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int> const& DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const& DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) beteween items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) beteween items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const* ptr) {
hipError_t result =
hipMemcpy(data(), ptr, bytes(), hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const* ptr) {
hipError_t result =
hipMemcpy(data(), ptr, bytes(), hipMemcpyHostToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void* ptr) {
hipError_t result =
hipMemcpy(ptr, data(), bytes(), hipMemcpyDeviceToHost);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile
// for this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float*>(pointer_), capacity_, seed, dist);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t>*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<
cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t>*>(
pointer_),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<
cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float>*>(pointer_),
capacity_, seed, dist);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double*>(pointer_), capacity_, seed, dist);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double>*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t*>(pointer_), capacity_, seed, dist);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t*>(pointer_), capacity_, seed,
dist);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t*>(host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t*>(host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t*>(host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float*>(host_data.data()), capacity_, seed,
dist);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<
cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<
cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<
cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t*>(host_data.data()), capacity_,
seed, dist);
break;
default:
break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed,
int MetaSizeInBits) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile
// for this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t*>(pointer_), capacity_, seed,
MetaSizeInBits);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t*>(pointer_), capacity_, seed,
MetaSizeInBits);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed,
int MetaSizeInBits) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t*>(host_data.data()), capacity_,
seed, MetaSizeInBits);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t*>(host_data.data()), capacity_,
seed, MetaSizeInBits);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(library::NumericTypeID numeric_type,
void const* ptr_A, void const* ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const*>(ptr_A),
reinterpret_cast<half_t const*>(ptr_B), capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const*>(ptr_A),
reinterpret_cast<bfloat16_t const*>(ptr_B), capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const*>(ptr_A),
reinterpret_cast<tfloat32_t const*>(ptr_B), capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const*>(ptr_A),
reinterpret_cast<float const*>(ptr_B), capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<
cutlass::complex<float>>(
reinterpret_cast<complex<float> const*>(ptr_A),
reinterpret_cast<complex<float> const*>(ptr_B), capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const*>(ptr_A),
reinterpret_cast<complex<half_t> const*>(ptr_B), capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const*>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const*>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const*>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const*>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const*>(ptr_A),
reinterpret_cast<double const*>(ptr_B), capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const*>(ptr_A),
reinterpret_cast<complex<double> const*>(ptr_B), capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const*>(ptr_A),
reinterpret_cast<int2b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const*>(ptr_A),
reinterpret_cast<int4b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const*>(ptr_A),
reinterpret_cast<int8_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const*>(ptr_A),
reinterpret_cast<int16_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const*>(ptr_A),
reinterpret_cast<int32_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const*>(ptr_A),
reinterpret_cast<int64_t const*>(ptr_B), capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const*>(ptr_A),
reinterpret_cast<uint1b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const*>(ptr_A),
reinterpret_cast<uint2b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const*>(ptr_A),
reinterpret_cast<uint4b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const*>(ptr_A),
reinterpret_cast<uint8_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const*>(ptr_A),
reinterpret_cast<uint16_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const*>(ptr_A),
reinterpret_cast<uint32_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const*>(ptr_A),
reinterpret_cast<uint64_t const*>(ptr_B), capacity);
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type, void const* ptr_A,
void const* ptr_B, size_t capacity, double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const*>(ptr_A),
reinterpret_cast<half_t const*>(ptr_B), capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const*>(ptr_A),
reinterpret_cast<bfloat16_t const*>(ptr_B), capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const*>(ptr_A),
reinterpret_cast<tfloat32_t const*>(ptr_B), capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const*>(ptr_A),
reinterpret_cast<float const*>(ptr_B), capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const*>(ptr_A),
reinterpret_cast<double const*>(ptr_B), capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const*>(ptr_A),
reinterpret_cast<int2b_t const*>(ptr_B), capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const*>(ptr_A),
reinterpret_cast<int4b_t const*>(ptr_B), capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const*>(ptr_A),
reinterpret_cast<int8_t const*>(ptr_B), capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const*>(ptr_A),
reinterpret_cast<int16_t const*>(ptr_B), capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const*>(ptr_A),
reinterpret_cast<int32_t const*>(ptr_B), capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const*>(ptr_A),
reinterpret_cast<int64_t const*>(ptr_B), capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const*>(ptr_A),
reinterpret_cast<uint1b_t const*>(ptr_B), capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const*>(ptr_A),
reinterpret_cast<uint2b_t const*>(ptr_B), capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const*>(ptr_A),
reinterpret_cast<uint4b_t const*>(ptr_B), capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const*>(ptr_A),
reinterpret_cast<uint8_t const*>(ptr_B), capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const*>(ptr_A),
reinterpret_cast<uint16_t const*>(ptr_B), capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const*>(ptr_A),
reinterpret_cast<uint32_t const*>(ptr_B), capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const*>(ptr_A),
reinterpret_cast<uint64_t const*>(ptr_B), capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids
// false positives. (i.e. "pass" really means passing. "Fail" may not
// actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<
cutlass::complex<half_t>>(
reinterpret_cast<complex<half_t> const*>(ptr_A),
reinterpret_cast<complex<half_t> const*>(ptr_B), capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<
cutlass::complex<float>>(
reinterpret_cast<complex<float> const*>(ptr_A),
reinterpret_cast<complex<float> const*>(ptr_B), capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<
cutlass::complex<double>>(
reinterpret_cast<complex<double> const*>(ptr_A),
reinterpret_cast<complex<double> const*>(ptr_B), capacity);
default: {
throw std::runtime_error(std::string("Unsupported numeric type: ") +
to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord& coord, std::vector<int> const& vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord& coord, std::vector<int> const& vec) {
coord[0] = vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord& coord, std::vector<int> const& vec) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(std::ostream& out,
DeviceAllocation& allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent,
allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank>, Layout::kStrideRank>(
stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const*>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(std::ostream& out,
DeviceAllocation& allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(
out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(
out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T,
layout::RowMajorInterleaved<2>>(
out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T,
layout::RowMajorInterleaved<4>>(
out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<
T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<
T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<
T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(
out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(
out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(
out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(
out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(
out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(
out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(std::ostream& out) {
switch (this->type()) {
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t>>(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float>>(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double>>(out, *this);
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| 5f1748ea6022c55860d65d6711b57a621f63950c.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int> get_packed_layout_stride(
std::vector<int> const& extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error(
"Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id, std::vector<int> const& extent) {
std::vector<int> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(
extent);
break;
case library::LayoutTypeID::kRowMajor:
stride =
get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<
cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<
cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(
extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(
extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(
extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride =
get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(
extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride =
get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(
extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride =
get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(
extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride =
get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(
extent);
break;
default:
break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(void* bytes, library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int>& stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(bytes, layout_id, extent, stride);
} else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching "
"Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout*>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(void* bytes,
library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int>& stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<2>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<4>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<16>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<
cutlass::layout::ColumnMajorInterleaved<64>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(
bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(
bytes, layout_id, extent, stride);
default:
break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation()
: type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {}
DeviceAllocation::DeviceAllocation(library::NumericTypeID type, size_t capacity)
: type_(type),
batch_stride_(capacity),
capacity_(capacity),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
cudaError_t result = cudaMalloc((void**)&pointer_, bytes(type, capacity));
if (result != cudaSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int> const& stride,
int batch_count)
: type_(type),
batch_stride_(size_t(0)),
capacity_(size_t(0)),
pointer_(nullptr),
batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
cudaFree(pointer_);
}
}
DeviceAllocation& DeviceAllocation::reset() {
if (pointer_) {
cudaFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation& DeviceAllocation::reset(library::NumericTypeID type,
size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
cudaError_t result = cudaMalloc((void**)&pointer_, bytes(type_, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation& DeviceAllocation::reset(library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const& extent,
std::vector<int> const& stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(
sizeof(pointer_) +
(sizeof(int) * library::get_layout_stride_rank(layout_id)),
0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ =
construct_layout(tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id, extent, stride_);
capacity_ = batch_stride_ * batch_count_;
cudaError_t result = cudaMalloc((void**)&pointer_, bytes(type, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void* DeviceAllocation::data() const {
return pointer_;
}
void* DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char*>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int> const& DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const& DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) beteween items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) beteween items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const* ptr) {
cudaError_t result =
cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const* ptr) {
cudaError_t result =
cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void* ptr) {
cudaError_t result =
cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile
// for this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float*>(pointer_), capacity_, seed, dist);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t>*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<
cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t>*>(
pointer_),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<
cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float>*>(pointer_),
capacity_, seed, dist);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double*>(pointer_), capacity_, seed, dist);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double>*>(pointer_), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t*>(pointer_), capacity_, seed, dist);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t*>(pointer_), capacity_, seed,
dist);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t*>(pointer_), capacity_, seed,
dist);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t*>(host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t*>(host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t*>(host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float*>(host_data.data()), capacity_, seed,
dist);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<
cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<
cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<
cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double>*>(
host_data.data()),
capacity_, seed, dist);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t*>(host_data.data()), capacity_,
seed, dist);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t*>(host_data.data()), capacity_,
seed, dist);
break;
default:
break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed,
int MetaSizeInBits) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile
// for this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t*>(pointer_), capacity_, seed,
MetaSizeInBits);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t*>(pointer_), capacity_, seed,
MetaSizeInBits);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed,
int MetaSizeInBits) {
if (!good()) {
throw std::runtime_error(
"Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t*>(host_data.data()), capacity_,
seed, MetaSizeInBits);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t*>(host_data.data()), capacity_,
seed, MetaSizeInBits);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(library::NumericTypeID numeric_type,
void const* ptr_A, void const* ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const*>(ptr_A),
reinterpret_cast<half_t const*>(ptr_B), capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const*>(ptr_A),
reinterpret_cast<bfloat16_t const*>(ptr_B), capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const*>(ptr_A),
reinterpret_cast<tfloat32_t const*>(ptr_B), capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const*>(ptr_A),
reinterpret_cast<float const*>(ptr_B), capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<
cutlass::complex<float>>(
reinterpret_cast<complex<float> const*>(ptr_A),
reinterpret_cast<complex<float> const*>(ptr_B), capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const*>(ptr_A),
reinterpret_cast<complex<half_t> const*>(ptr_B), capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const*>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const*>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const*>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const*>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const*>(ptr_A),
reinterpret_cast<double const*>(ptr_B), capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const*>(ptr_A),
reinterpret_cast<complex<double> const*>(ptr_B), capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const*>(ptr_A),
reinterpret_cast<int2b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const*>(ptr_A),
reinterpret_cast<int4b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const*>(ptr_A),
reinterpret_cast<int8_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const*>(ptr_A),
reinterpret_cast<int16_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const*>(ptr_A),
reinterpret_cast<int32_t const*>(ptr_B), capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const*>(ptr_A),
reinterpret_cast<int64_t const*>(ptr_B), capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const*>(ptr_A),
reinterpret_cast<uint1b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const*>(ptr_A),
reinterpret_cast<uint2b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const*>(ptr_A),
reinterpret_cast<uint4b_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const*>(ptr_A),
reinterpret_cast<uint8_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const*>(ptr_A),
reinterpret_cast<uint16_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const*>(ptr_A),
reinterpret_cast<uint32_t const*>(ptr_B), capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const*>(ptr_A),
reinterpret_cast<uint64_t const*>(ptr_B), capacity);
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type, void const* ptr_A,
void const* ptr_B, size_t capacity, double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const*>(ptr_A),
reinterpret_cast<half_t const*>(ptr_B), capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const*>(ptr_A),
reinterpret_cast<bfloat16_t const*>(ptr_B), capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const*>(ptr_A),
reinterpret_cast<tfloat32_t const*>(ptr_B), capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const*>(ptr_A),
reinterpret_cast<float const*>(ptr_B), capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const*>(ptr_A),
reinterpret_cast<double const*>(ptr_B), capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const*>(ptr_A),
reinterpret_cast<int2b_t const*>(ptr_B), capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const*>(ptr_A),
reinterpret_cast<int4b_t const*>(ptr_B), capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const*>(ptr_A),
reinterpret_cast<int8_t const*>(ptr_B), capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const*>(ptr_A),
reinterpret_cast<int16_t const*>(ptr_B), capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const*>(ptr_A),
reinterpret_cast<int32_t const*>(ptr_B), capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const*>(ptr_A),
reinterpret_cast<int64_t const*>(ptr_B), capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const*>(ptr_A),
reinterpret_cast<uint1b_t const*>(ptr_B), capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const*>(ptr_A),
reinterpret_cast<uint2b_t const*>(ptr_B), capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const*>(ptr_A),
reinterpret_cast<uint4b_t const*>(ptr_B), capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const*>(ptr_A),
reinterpret_cast<uint8_t const*>(ptr_B), capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const*>(ptr_A),
reinterpret_cast<uint16_t const*>(ptr_B), capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const*>(ptr_A),
reinterpret_cast<uint32_t const*>(ptr_B), capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const*>(ptr_A),
reinterpret_cast<uint64_t const*>(ptr_B), capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids
// false positives. (i.e. "pass" really means passing. "Fail" may not
// actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<
cutlass::complex<half_t>>(
reinterpret_cast<complex<half_t> const*>(ptr_A),
reinterpret_cast<complex<half_t> const*>(ptr_B), capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<
cutlass::complex<float>>(
reinterpret_cast<complex<float> const*>(ptr_A),
reinterpret_cast<complex<float> const*>(ptr_B), capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<
cutlass::complex<double>>(
reinterpret_cast<complex<double> const*>(ptr_A),
reinterpret_cast<complex<double> const*>(ptr_B), capacity);
default: {
throw std::runtime_error(std::string("Unsupported numeric type: ") +
to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord& coord, std::vector<int> const& vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord& coord, std::vector<int> const& vec) {
coord[0] = vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord& coord, std::vector<int> const& vec) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(std::ostream& out,
DeviceAllocation& allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent,
allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank>, Layout::kStrideRank>(
stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const*>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(std::ostream& out,
DeviceAllocation& allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(
out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(
out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T,
layout::RowMajorInterleaved<2>>(
out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T,
layout::RowMajorInterleaved<4>>(
out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<
T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<
T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<
T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<
T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(
out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(
out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(
out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(
out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(
out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(
out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(std::ostream& out) {
switch (this->type()) {
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t>>(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float>>(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double>>(out, *this);
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
|
afe4bb7fe216338638c2040be0c2a43c28b0afca.hip | // !!! This is a file automatically generated by hipify!!!
// ***********************************************************************
//
// Demo program for education in subject
// Computer Architectures and Paralel Systems
// Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava
// email:[email protected]
//
// Example of CUDA Technology Usage
// Multiplication of elements in float array
//
// ***********************************************************************
#include <hip/hip_runtime.h>
#include <stdio.h>
// Demo kernel for array elements multiplication.
// Every thread selects one element and multiply it.
__global__ void kernel_mult( float *pole, int L, float Mult )
{
int l = blockDim.x * blockIdx.x + threadIdx.x;
// if grid is greater then length of array...
if ( l >= L ) return;
pole[ l ] *= Mult;
}
void run_mult( float *P, int Length, float Mult )
{
hipError_t cerr;
int threads = 128;
int blocks = ( Length + threads - 1 ) / threads;
// Memory allocation in GPU device
float *cudaP;
cerr = hipMalloc( &cudaP, Length * sizeof( float ) );
if ( cerr != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( cerr ) );
// Copy data from PC to GPU device
cerr = hipMemcpy( cudaP, P, Length * sizeof( float ), hipMemcpyHostToDevice );
if ( cerr != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( cerr ) );
// Grid creation
hipLaunchKernelGGL(( kernel_mult), dim3(blocks), dim3(threads) , 0, 0, cudaP, Length, Mult );
if ( ( cerr = hipGetLastError() ) != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( cerr ) );
// Copy data from GPU device to PC
cerr = hipMemcpy( P, cudaP, Length * sizeof( float ), hipMemcpyDeviceToHost );
if ( cerr != hipSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( cerr ) );
// Free memory
hipFree( cudaP );
}
| afe4bb7fe216338638c2040be0c2a43c28b0afca.cu | // ***********************************************************************
//
// Demo program for education in subject
// Computer Architectures and Paralel Systems
// Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava
// email:[email protected]
//
// Example of CUDA Technology Usage
// Multiplication of elements in float array
//
// ***********************************************************************
#include <cuda_runtime.h>
#include <stdio.h>
// Demo kernel for array elements multiplication.
// Every thread selects one element and multiply it.
__global__ void kernel_mult( float *pole, int L, float Mult )
{
int l = blockDim.x * blockIdx.x + threadIdx.x;
// if grid is greater then length of array...
if ( l >= L ) return;
pole[ l ] *= Mult;
}
void run_mult( float *P, int Length, float Mult )
{
cudaError_t cerr;
int threads = 128;
int blocks = ( Length + threads - 1 ) / threads;
// Memory allocation in GPU device
float *cudaP;
cerr = cudaMalloc( &cudaP, Length * sizeof( float ) );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Copy data from PC to GPU device
cerr = cudaMemcpy( cudaP, P, Length * sizeof( float ), cudaMemcpyHostToDevice );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Grid creation
kernel_mult<<< blocks, threads >>>( cudaP, Length, Mult );
if ( ( cerr = cudaGetLastError() ) != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Copy data from GPU device to PC
cerr = cudaMemcpy( P, cudaP, Length * sizeof( float ), cudaMemcpyDeviceToHost );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Free memory
cudaFree( cudaP );
}
|
893fe52765b66ac5208a67adb685317fd998a53a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelA(float* r, float* x, float* y, float* z, int size)
{
for (int i = threadIdx.x; i < size; i += blockDim.x) {
r[i] = x[i] * y[i] + z[i];
}
} | 893fe52765b66ac5208a67adb685317fd998a53a.cu | #include "includes.h"
__global__ void kernelA(float* r, float* x, float* y, float* z, int size)
{
for (int i = threadIdx.x; i < size; i += blockDim.x) {
r[i] = x[i] * y[i] + z[i];
}
} |
24e62666aa8008e504894bb6a215ab69194f184c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
{
C[i]=A[i]+B[i];
}
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256;
//INSERT CODE HERE
dim3 DimGrid(n/BLOCK_SIZE+1);
dim3 DimBlock(BLOCK_SIZE);
hipLaunchKernelGGL(( VecAdd), dim3(DimGrid),dim3(DimBlock), 0, 0, n,A,B,C);
}
| 24e62666aa8008e504894bb6a215ab69194f184c.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
{
C[i]=A[i]+B[i];
}
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256;
//INSERT CODE HERE
dim3 DimGrid(n/BLOCK_SIZE+1);
dim3 DimBlock(BLOCK_SIZE);
VecAdd<<<DimGrid,DimBlock>>>(n,A,B,C);
}
|
3a57eadb0b52a88b133ca9b1e34e248332c57d93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
namespace anakin {
namespace saber {
template<typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
// printf("in %d = %f\n",tid,output[tid]);
}
}
template<typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq] * lastdim + tid % lastdim] = input[tid];
// printf("out %d = %f\n",map[seq]*lastdim + tid % lastdim,output[map[seq]*lastdim + tid % lastdim]);
}
}
template<typename Dtype>
void trans_map2out_cfunc(const Dtype* input, Dtype* output, int word_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec) {
int count = seq_sum * word_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2out << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, word_size);
// hipDeviceSynchronize();
}
template<typename Dtype>
void trans_map2in_cfunc(const Dtype* input, Dtype* output, int hidden_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec) {
int count = seq_sum * hidden_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2in << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, hidden_size);
}
template void trans_map2in_cfunc<float>(const float* input, float* output, int hidden_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec);
template void trans_map2out_cfunc<float>(const float* input, float* output, int word_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec);
template <typename Dtype>
__global__ void sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= h * w) {
return;
}
int h_id = tid / w;
int w_id = tid % w;
out[w_id * h + h_id] = in[h_id * stride_w + w_id];
}
template <typename Dtype>
void get_sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w, hipStream_t stream) {
int num_threads = h * w;
hipLaunchKernelGGL(( sub_tensor), dim3(CUDA_GET_BLOCKS(num_threads)), dim3(CUDA_NUM_THREADS), 0, stream, in, out, h, w, stride_w);
}
template void get_sub_tensor(const float* in, float* out, int h, int w, int stride_w, hipStream_t stream);
}
}
| 3a57eadb0b52a88b133ca9b1e34e248332c57d93.cu | #include "cuda_utils.h"
namespace anakin {
namespace saber {
template<typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
// printf("in %d = %f\n",tid,output[tid]);
}
}
template<typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq] * lastdim + tid % lastdim] = input[tid];
// printf("out %d = %f\n",map[seq]*lastdim + tid % lastdim,output[map[seq]*lastdim + tid % lastdim]);
}
}
template<typename Dtype>
void trans_map2out_cfunc(const Dtype* input, Dtype* output, int word_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec) {
int count = seq_sum * word_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2out << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, word_size);
// cudaDeviceSynchronize();
}
template<typename Dtype>
void trans_map2in_cfunc(const Dtype* input, Dtype* output, int hidden_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec) {
int count = seq_sum * hidden_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2in << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, hidden_size);
}
template void trans_map2in_cfunc<float>(const float* input, float* output, int hidden_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec);
template void trans_map2out_cfunc<float>(const float* input, float* output, int word_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec);
template <typename Dtype>
__global__ void sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= h * w) {
return;
}
int h_id = tid / w;
int w_id = tid % w;
out[w_id * h + h_id] = in[h_id * stride_w + w_id];
}
template <typename Dtype>
void get_sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w, cudaStream_t stream) {
int num_threads = h * w;
sub_tensor<<<CUDA_GET_BLOCKS(num_threads), CUDA_NUM_THREADS, 0, stream>>>(in, out, h, w, stride_w);
}
template void get_sub_tensor(const float* in, float* out, int h, int w, int stride_w, cudaStream_t stream);
}
}
|
24f8ca7691fab67249c58fc23afd7ecdc0cc2d32.hip | // !!! This is a file automatically generated by hipify!!!
///
/// @file
///
/// @author Mirko Myllykoski ([email protected]), Ume University
///
/// @internal LICENSE
///
/// Copyright (c) 2019-2020, Ume Universitet
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// 1. Redistributions of source code must retain the above copyright notice,
/// this list of conditions and the following disclaimer.
///
/// 2. Redistributions in binary form must reproduce the above copyright notice,
/// this list of conditions and the following disclaimer in the documentation
/// and/or other materials provided with the distribution.
///
/// 3. Neither the name of the copyright holder nor the names of its
/// contributors may be used to endorse or promote products derived from this
/// software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include <starneig_config.h>
#include <starneig/configuration.h>
#include "hip/hip_runtime.h"
#include "lapack.h"
#include "../common/common.h"
#include "../common/tiles.h"
#include "../common/math.h"
#include <math.h>
#include <starpu.h>
#include <starpu_cublas_v2.h>
static const double one = 1.0;
static const double zero = 0.0;
static __global__ void _init_local_q(int n, int ld, double *ptr)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
for(int j = idy; j < n; j += gridDim.y*blockDim.y)
for(int i = idx; i < n; i += gridDim.x*blockDim.x)
ptr[j*ld+i] = i == j ? 1.0 : 0.0;
}
static void init_local_q(hipStream_t stream, int n, int ld, double *ptr)
{
dim3 threads(32,32);
dim3 blocks(MIN(5, divceil(n, threads.x)), MIN(5, divceil(n, threads.y)));
hipLaunchKernelGGL(( _init_local_q), dim3(blocks), dim3(threads), 0, stream, n, ld, ptr);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
static void left_gemm_update(hipStream_t stream, hipblasHandle_t handle,
int rbegin, int rend, int cbegin, int cend, int ldQ, int ldA, int ldT,
double const *Q, double *A, double *T) {
int m = rend-rbegin;
int n = cend-cbegin;
int k = rend-rbegin;
if (m == 0 || n == 0)
return;
hipError_t err = hipMemcpy2DAsync(
T, ldT*sizeof(double), A+cbegin*ldA+rbegin, ldA*sizeof(double),
(rend-rbegin)*sizeof(double), cend-cbegin, hipMemcpyDeviceToDevice,
stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
hipblasSetStream(handle, stream);
hipblasStatus_t cublas_err = hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
m, n, k, &one, Q, ldQ, T, ldT, &zero, A+cbegin*ldA+rbegin, ldA);
if (cublas_err != HIPBLAS_STATUS_SUCCESS)
STARPU_CUBLAS_REPORT_ERROR(cublas_err);
}
static void right_gemm_update(hipStream_t stream, hipblasHandle_t handle,
int rbegin, int rend, int cbegin, int cend, int ldQ, int ldA, int ldT,
double const *Q, double *A, double *T) {
int m = rend-rbegin;
int n = cend-cbegin;
int k = cend-cbegin;
if (m == 0 || n == 0)
return;
double one = 1.0;
double zero = 0.0;
hipError_t err = hipMemcpy2DAsync(
T, ldT*sizeof(double), A+cbegin*ldA+rbegin, ldA*sizeof(double),
(rend-rbegin)*sizeof(double), cend-cbegin, hipMemcpyDeviceToDevice,
stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
hipblasSetStream(handle, stream);
hipblasStatus_t cublas_err = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k, &one, T, ldT, Q, ldQ, &zero, A+cbegin*ldA+rbegin, ldA);
if (cublas_err != HIPBLAS_STATUS_SUCCESS)
STARPU_CUBLAS_REPORT_ERROR(cublas_err);
}
static int reorder_window(hipStream_t stream, hipblasHandle_t handle,
int window_size, int threshold, int n, int ldQ, int ldZ, int ldA, int ldB,
int *select, double *Q, double *Z, double *A, double *B)
{
int ret = 0;
hipError_t err;
int *_select = NULL;
double *_lA = NULL; size_t ld_lA = 0;
double *_lB = NULL; size_t ld_lB = 0;
double *_lQ = NULL; size_t ld_lQ = 0;
double *_lZ = NULL; size_t ld_lZ = 0;
double *_work = NULL;
double *lQ = NULL; size_t ldlQ = 0;
double *lZ = NULL; size_t ldlZ = 0;
double *vT = NULL; size_t ldvT = 0;
double *hT = NULL; size_t ldhT = 0;
double *qT = NULL; size_t ldqT = 0;
int streams_created = 0;
hipStream_t left, right, right_q;
int begin = 0;
int end = 0;
// copy eigenvalue selection vector from device memory
err = hipHostMalloc(&_select, n*sizeof(int), hipHostMallocDefault);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipMemcpyAsync(
_select, select, n*sizeof(int), hipMemcpyDeviceToHost, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// The window may already contain "tainted" tiles but even in that
// situation the window can be partially reordered. To be able to do it we
// need to identify the last non-tainted selected tile:
int term = 0;
for (int i = 0; i < n; i++) {
if (_select[i] == 2) {
// make sure that tainted section is marked tainted
for (int j = i; j < n; j++)
_select[j] = 2;
break;
}
if (_select[i] == 1)
term = i+1;
}
// exit if nothing can be done
if (term < 2) {
ret = 1;
goto cleanup;
}
// allocate work space for dtgsen/dtrsen
if (B != NULL)
_work = (double *) malloc((7*n+16)*sizeof(double));
else
_work = (double *) malloc(3*n*sizeof(double));
// make sure that the window is big enough and call
// *_starneig_reorder_window directly if it is not
if (n < threshold) {
if (B != NULL) {
ld_lA = ld_lB = ld_lQ = ld_lZ = divceil(n, 8)*8;
err = hipHostMalloc(
&_lA, 4*n*ld_lA*sizeof(double), hipHostMallocDefault);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lB = _lA + n*ld_lA;
_lQ = _lB + n*ld_lB;
_lZ = _lQ + n*ld_lQ;
}
else {
ld_lA = ld_lQ = divceil(n, 8)*8;
err = hipHostMalloc(
&_lA, 2*n*ld_lA*sizeof(double), hipHostMallocDefault);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lQ = _lA + n*ld_lA;
}
// copy A matrix
err = hipMemcpy2DAsync(
_lA, ld_lA*sizeof(double), A, ldA*sizeof(double),
n*sizeof(double), n, hipMemcpyDeviceToHost, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// copy Q matrix
err = hipMemcpy2DAsync(
_lQ, ld_lQ*sizeof(double), Q, ldQ*sizeof(double),
n*sizeof(double), n, hipMemcpyDeviceToHost, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
// copy B matrix
err = hipMemcpy2DAsync(
_lB, ld_lB*sizeof(double), B, ldB*sizeof(double),
n*sizeof(double), n, hipMemcpyDeviceToHost, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// copy Z matrix
err = hipMemcpy2DAsync(
_lZ, ld_lZ*sizeof(double), Z, ldZ*sizeof(double),
n*sizeof(double), n, hipMemcpyDeviceToHost, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// reorder
int m;
if (B != NULL)
ret = starneig_dtgsen(0, term, ld_lQ, ld_lZ, ld_lA, ld_lB, &m,
_select, _lQ, _lZ, _lA, _lB, _work);
else
ret = starneig_dtrsen(
0, term, ld_lQ, ld_lA, &m, _select, _lQ, _lA, _work);
// store A matrix
err = hipMemcpy2DAsync(
A, ldA*sizeof(double), _lA, ld_lA*sizeof(double),
n*sizeof(double), n, hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// store Q matrix
err = hipMemcpy2DAsync(
Q, ldQ*sizeof(double), _lQ, ld_lQ*sizeof(double),
n*sizeof(double), n, hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
// store B matrix
err = hipMemcpy2DAsync(
B, ldB*sizeof(double), _lB, ld_lB*sizeof(double),
n*sizeof(double), n, hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// store Z matrix
err = hipMemcpy2DAsync(
Z, ldZ*sizeof(double), _lZ, ld_lZ*sizeof(double),
n*sizeof(double), n, hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
// if an error occurred, mark the whole window tainted
if (ret != 0)
for (int i = 0; i < n; i++)
_select[i] = 2;
goto cleanup;
}
// allocate host workspace
if (B != NULL) {
ld_lA = ld_lB = ld_lQ = ld_lZ = divceil(window_size, 8)*8;
err = hipHostMalloc(
&_lA, 4*window_size*ld_lA*sizeof(double), hipHostMallocDefault);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lB = _lA + window_size*ld_lA;
_lQ = _lB + window_size*ld_lB;
_lZ = _lQ + window_size*ld_lQ;
}
else {
ld_lA = ld_lQ = divceil(window_size, 8)*8;
err = hipHostMalloc(
&_lA, 2*window_size*ld_lA*sizeof(double), hipHostMallocDefault);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lQ = _lA + window_size*ld_lA;
}
// device side local transformation matrices
if (B != NULL) {
err = hipMallocPitch(
&lQ, &ldlQ, window_size*sizeof(double), 2*window_size);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldlQ /= sizeof(double);
lZ = lQ + window_size*ldlQ;
ldlZ = ldlQ;
}
else {
err = hipMallocPitch(
&lQ, &ldlQ, window_size*sizeof(double), window_size);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldlQ /= sizeof(double);
lZ = lQ;
ldlZ = ldlQ;
}
// device side scratch buffers for GEMM kernels
err = hipMallocPitch(&hT, &ldhT, window_size*sizeof(double), n);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldhT /= sizeof(double);
err = hipMallocPitch(&vT, &ldvT, n*sizeof(double), 2*window_size);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldvT /= sizeof(double);
qT = vT + window_size*ldvT;
ldqT = ldvT;
// GEMM kernel streams
hipStreamCreate(&left);
hipStreamCreate(&right);
hipStreamCreate(&right_q);
streams_created = 1;
// repeat until all chains have been processed
while (1) {
// place the window chain
int in_chain = 0;
for (int i = end; in_chain < window_size/2 && i < term; i++) {
if (_select[i]) {
in_chain++;
end = i+1;
}
}
// quit if the chain is empty
if (in_chain == 0)
goto cleanup;
// place the first window
int first = 1;
int wend = MIN(term, end+1);
int wbegin = MAX(begin, wend-window_size);
hipEvent_t left_ready;
hipEventCreate(&left_ready);
hipEventRecord(left_ready, stream);
hipEvent_t right_ready;
hipEventCreate(&right_ready);
hipEventRecord(right_ready, stream);
hipEvent_t right_q_ready;
hipEventCreate(&right_q_ready);
hipEventRecord(right_q_ready, stream);
// repeat until all windows in the current chain have been processed
int in_window = 0;
while(1) {
// calculate window size
int wsize = wend-wbegin;
// the main stream should wait until all right-hand side updates
// have finished
hipStreamWaitEvent(stream, right_ready, 0);
hipEventDestroy(right_ready);
// copy padded window from the matrix A
err = hipMemcpy2DAsync(_lA, ld_lA*sizeof(double),
A+(size_t)wbegin*ldA+wbegin, ldA*sizeof(double),
wsize*sizeof(double), wsize, hipMemcpyDeviceToHost, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// resize window if necessary
double *__lA = _lA;
if (wbegin != begin && _lA[1] == 0.0) {
wbegin++;
__lA = _lA+ld_lA+1;
}
if (first && wend < term && _lA[(wsize-2)*ld_lA+wsize-1] == 0.0)
wend--;
wsize = wend-wbegin;
// copy window from the matrix B
if (B != NULL) {
err = hipMemcpy2DAsync(_lB, ld_lB*sizeof(double),
B+(size_t)wbegin*ldB+wbegin, ldB*sizeof(double),
wsize*sizeof(double), wsize, hipMemcpyDeviceToHost,
stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
// reorder the window
if (B != NULL) {
starneig_init_local_q(wsize, ld_lQ, _lQ);
starneig_init_local_q(wsize, ld_lZ, _lZ);
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ret = starneig_dtgsen(0, wsize, ld_lQ, ld_lZ, ld_lA, ld_lB,
&in_window, _select+wbegin, _lQ, _lZ, __lA, _lB, _work);
}
else {
starneig_init_local_q(wsize, ld_lQ, _lQ);
ret = starneig_dtrsen(0, wsize, ld_lQ, ld_lA,
&in_window, _select+wbegin, _lQ, __lA, _work);
}
// store window
err = hipMemcpy2DAsync(
A+(size_t)wbegin*ldA+wbegin, ldA*sizeof(double),
__lA, ld_lA*sizeof(double),
wsize*sizeof(double), wsize, hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
err = hipMemcpy2DAsync(
B+(size_t)wbegin*ldB+wbegin, ldB*sizeof(double),
_lB, ld_lB*sizeof(double),
wsize*sizeof(double), wsize, hipMemcpyHostToDevice,
stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
// the main stream should wait until all left-hand side updates
// and Q/Z matrix updates have finished
hipStreamWaitEvent(stream, left_ready, 0);
hipEventDestroy(left_ready);
hipStreamWaitEvent(stream, right_q_ready, 0);
hipEventDestroy(right_q_ready);
// move transformation matrices to device memory
err = hipMemcpy2DAsync(
lQ, ldlQ*sizeof(double), _lQ, ld_lQ*sizeof(double),
wsize*sizeof(double), wsize, hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
err = hipMemcpy2DAsync(
lZ, ldlZ*sizeof(double), _lZ, ld_lZ*sizeof(double),
wsize*sizeof(double), wsize, hipMemcpyHostToDevice,
stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
hipEvent_t window_ready;
hipEventCreate(&window_ready);
hipEventRecord(window_ready, stream);
hipStreamWaitEvent(left, window_ready, 0);
hipStreamWaitEvent(right, window_ready, 0);
hipStreamWaitEvent(right_q, window_ready, 0);
hipEventDestroy(window_ready);
// apply the local transformation matrices lQ and lZ to Q and Z
if (Q != NULL)
right_gemm_update(right_q, handle,
0, MIN(term, end+1), wbegin, wend, ldlQ, ldQ, ldqT,
lQ, Q, qT);
if (Z != NULL)
right_gemm_update(right_q, handle,
0, MIN(term, end+1), wbegin, wend, ldlZ, ldZ, ldqT,
lZ, Z, qT);
// apply the local transformation matrices lQ and lZ to A
right_gemm_update(right, handle,
0, wbegin, wbegin, wend, ldlZ, ldA, ldvT, lZ, A, vT);
left_gemm_update(left, handle,
wbegin, wend, wend, n, ldlQ, ldA, ldhT, lQ, A, hT);
// apply the local transformation matrices lQ and lZ to Z
if (B != NULL) {
right_gemm_update(right, handle,
0, wbegin, wbegin, wend, ldlZ, ldB, ldvT, lZ, B, vT);
left_gemm_update(left, handle,
wbegin, wend, wend, n, ldlQ, ldB, ldhT, lQ, B, hT);
}
// if an error occurred, mark the current window and everything
// below it tainted
if (ret != 0) {
for (int i = wbegin; i < n; i++)
_select[i] = 2;
goto cleanup;
}
hipEventCreate(&left_ready);
hipEventRecord(left_ready, left);
// quit if this was the topmost window in the chain
if (wbegin == begin)
break;
hipEventCreate(&right_ready);
hipEventRecord(right_ready, right);
hipEventCreate(&right_q_ready);
hipEventRecord(right_q_ready, right_q);
// place the next window such that it does not split any 2-by-2
// tiles
first = 0;
wend = MIN(term, wbegin + in_window);
wbegin = MAX(begin, wend-window_size);
}
// the main stream should wait until all left-hand side updates
// from the previous window chain have finished
hipStreamWaitEvent(stream, left_ready, 0);
hipEventDestroy(left_ready);
// advance downwards
begin += in_window;
}
cleanup:
err = hipMemcpyAsync(
select, _select, n*sizeof(int), hipMemcpyHostToDevice, stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (streams_created) {
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipStreamSynchronize(left);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipStreamSynchronize(right);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = hipStreamSynchronize(right_q);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
hipStreamDestroy(left);
hipStreamDestroy(right);
hipStreamDestroy(right_q);
hipblasSetStream(handle, stream);
}
hipHostFree(_select);
hipHostFree(_lA);
free(_work);
hipFree(lQ);
hipFree(vT);
hipFree(hT);
return ret;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void starneig_cuda_reorder_window(void *buffers[], void *cl_arg)
{
struct packing_info packing_info_A, packing_info_B;
struct range_packing_info packing_info_selected;
int window_size, threshold, swaps;
starpu_codelet_unpack_args(cl_arg,
&packing_info_selected, &packing_info_A, &packing_info_B,
&window_size, &threshold, &swaps);
hipError_t err;
hipStream_t stream = starpu_cuda_get_local_stream();
hipblasHandle_t handle = starpu_cublas_get_local_handle();
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST);
hipblasSetStream(handle, stream);
int size = packing_info_A.rend - packing_info_A.rbegin;
int general = packing_info_B.handles != 0;
int k = 0;
// local matrix Q
struct starpu_matrix_interface *lQ_i =
(struct starpu_matrix_interface *)buffers[k++];
double *lQ_ptr = (double*) STARPU_MATRIX_GET_PTR(lQ_i);
int lQ_ld = STARPU_MATRIX_GET_LD(lQ_i);
init_local_q(stream, size, lQ_ld, lQ_ptr);
// local matrix Z
double *lZ_ptr = NULL;
int lZ_ld = 0;
if (general) {
struct starpu_matrix_interface *lZ_i =
(struct starpu_matrix_interface *)buffers[k++];
lZ_ptr = (double*) STARPU_MATRIX_GET_PTR(lZ_i);
lZ_ld = STARPU_MATRIX_GET_LD(lZ_i);
init_local_q(stream, size, lZ_ld, lZ_ptr);
}
// local matrix A
struct starpu_matrix_interface *lA_i =
(struct starpu_matrix_interface *)buffers[k++];
double *lA_ptr = (double*) STARPU_MATRIX_GET_PTR(lA_i);
int lA_ld = STARPU_MATRIX_GET_LD(lA_i);
// local matrix B
double *lB_ptr = NULL;
int lB_ld = 0;
if (general) {
struct starpu_matrix_interface *lB_i =
(struct starpu_matrix_interface *)buffers[k++];
lB_ptr = (double*) STARPU_MATRIX_GET_PTR(lB_i);
lB_ld = STARPU_MATRIX_GET_LD(lB_i);
}
// eigenvalue selection bitmap
int *selected;
err = hipMalloc(&selected, size*sizeof(int));
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
struct starpu_vector_interface **select_i =
(struct starpu_vector_interface **)buffers + k;
k += packing_info_selected.handles;
uintptr_t* selected_ds = starneig_cuda_prepare_join_range(
&packing_info_selected, (void **)select_i);
starneig_cuda_join_range(
stream, &packing_info_selected, selected_ds, selected, 0);
// corresponding tiles from the matrix A
struct starpu_matrix_interface **A_i =
(struct starpu_matrix_interface **)buffers + k;
k += packing_info_A.handles;
struct tile_addr *A_ds =
starneig_cuda_prepare_join_window(&packing_info_A, (void **)A_i);
starneig_cuda_join_diag_window(
stream, &packing_info_A, A_ds, lA_ld, lA_ptr, 0);
// corresponding tiles from the matrix B
struct tile_addr *B_ds = NULL;
if (general) {
struct starpu_matrix_interface **B_i =
(struct starpu_matrix_interface **)buffers + k;
k += packing_info_B.handles;
B_ds =
starneig_cuda_prepare_join_window(&packing_info_B, (void **)B_i);
starneig_cuda_join_diag_window(
stream, &packing_info_B, B_ds, lB_ld, lB_ptr, 0);
}
// reorder
reorder_window(stream, handle,
window_size, threshold, size, lQ_ld, lZ_ld, lA_ld, lB_ld,
selected, lQ_ptr, lZ_ptr, lA_ptr, lB_ptr);
// store result
starneig_cuda_join_range(
stream, &packing_info_selected, selected_ds, selected, 1);
starneig_cuda_join_diag_window(
stream, &packing_info_A, A_ds, lA_ld, lA_ptr, 1);
if (general)
starneig_cuda_join_diag_window(
stream, &packing_info_B, B_ds, lB_ld, lB_ptr, 1);
err = hipStreamSynchronize(stream);
if (err != hipSuccess)
STARPU_CUDA_REPORT_ERROR(err);
hipFree(selected);
}
| 24f8ca7691fab67249c58fc23afd7ecdc0cc2d32.cu | ///
/// @file
///
/// @author Mirko Myllykoski ([email protected]), Umeå University
///
/// @internal LICENSE
///
/// Copyright (c) 2019-2020, Umeå Universitet
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// 1. Redistributions of source code must retain the above copyright notice,
/// this list of conditions and the following disclaimer.
///
/// 2. Redistributions in binary form must reproduce the above copyright notice,
/// this list of conditions and the following disclaimer in the documentation
/// and/or other materials provided with the distribution.
///
/// 3. Neither the name of the copyright holder nor the names of its
/// contributors may be used to endorse or promote products derived from this
/// software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include <starneig_config.h>
#include <starneig/configuration.h>
#include "cuda.h"
#include "lapack.h"
#include "../common/common.h"
#include "../common/tiles.h"
#include "../common/math.h"
#include <math.h>
#include <starpu.h>
#include <starpu_cublas_v2.h>
static const double one = 1.0;
static const double zero = 0.0;
static __global__ void _init_local_q(int n, int ld, double *ptr)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
for(int j = idy; j < n; j += gridDim.y*blockDim.y)
for(int i = idx; i < n; i += gridDim.x*blockDim.x)
ptr[j*ld+i] = i == j ? 1.0 : 0.0;
}
static void init_local_q(cudaStream_t stream, int n, int ld, double *ptr)
{
dim3 threads(32,32);
dim3 blocks(MIN(5, divceil(n, threads.x)), MIN(5, divceil(n, threads.y)));
_init_local_q<<<blocks, threads, 0, stream>>>(n, ld, ptr);
cudaError err = cudaGetLastError();
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
static void left_gemm_update(cudaStream_t stream, cublasHandle_t handle,
int rbegin, int rend, int cbegin, int cend, int ldQ, int ldA, int ldT,
double const *Q, double *A, double *T) {
int m = rend-rbegin;
int n = cend-cbegin;
int k = rend-rbegin;
if (m == 0 || n == 0)
return;
cudaError err = cudaMemcpy2DAsync(
T, ldT*sizeof(double), A+cbegin*ldA+rbegin, ldA*sizeof(double),
(rend-rbegin)*sizeof(double), cend-cbegin, cudaMemcpyDeviceToDevice,
stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
cublasSetStream(handle, stream);
cublasStatus_t cublas_err = cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
m, n, k, &one, Q, ldQ, T, ldT, &zero, A+cbegin*ldA+rbegin, ldA);
if (cublas_err != CUBLAS_STATUS_SUCCESS)
STARPU_CUBLAS_REPORT_ERROR(cublas_err);
}
static void right_gemm_update(cudaStream_t stream, cublasHandle_t handle,
int rbegin, int rend, int cbegin, int cend, int ldQ, int ldA, int ldT,
double const *Q, double *A, double *T) {
int m = rend-rbegin;
int n = cend-cbegin;
int k = cend-cbegin;
if (m == 0 || n == 0)
return;
double one = 1.0;
double zero = 0.0;
cudaError err = cudaMemcpy2DAsync(
T, ldT*sizeof(double), A+cbegin*ldA+rbegin, ldA*sizeof(double),
(rend-rbegin)*sizeof(double), cend-cbegin, cudaMemcpyDeviceToDevice,
stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
cublasSetStream(handle, stream);
cublasStatus_t cublas_err = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k, &one, T, ldT, Q, ldQ, &zero, A+cbegin*ldA+rbegin, ldA);
if (cublas_err != CUBLAS_STATUS_SUCCESS)
STARPU_CUBLAS_REPORT_ERROR(cublas_err);
}
static int reorder_window(cudaStream_t stream, cublasHandle_t handle,
int window_size, int threshold, int n, int ldQ, int ldZ, int ldA, int ldB,
int *select, double *Q, double *Z, double *A, double *B)
{
int ret = 0;
cudaError err;
int *_select = NULL;
double *_lA = NULL; size_t ld_lA = 0;
double *_lB = NULL; size_t ld_lB = 0;
double *_lQ = NULL; size_t ld_lQ = 0;
double *_lZ = NULL; size_t ld_lZ = 0;
double *_work = NULL;
double *lQ = NULL; size_t ldlQ = 0;
double *lZ = NULL; size_t ldlZ = 0;
double *vT = NULL; size_t ldvT = 0;
double *hT = NULL; size_t ldhT = 0;
double *qT = NULL; size_t ldqT = 0;
int streams_created = 0;
cudaStream_t left, right, right_q;
int begin = 0;
int end = 0;
// copy eigenvalue selection vector from device memory
err = cudaHostAlloc(&_select, n*sizeof(int), cudaHostAllocDefault);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaMemcpyAsync(
_select, select, n*sizeof(int), cudaMemcpyDeviceToHost, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// The window may already contain "tainted" tiles but even in that
// situation the window can be partially reordered. To be able to do it we
// need to identify the last non-tainted selected tile:
int term = 0;
for (int i = 0; i < n; i++) {
if (_select[i] == 2) {
// make sure that tainted section is marked tainted
for (int j = i; j < n; j++)
_select[j] = 2;
break;
}
if (_select[i] == 1)
term = i+1;
}
// exit if nothing can be done
if (term < 2) {
ret = 1;
goto cleanup;
}
// allocate work space for dtgsen/dtrsen
if (B != NULL)
_work = (double *) malloc((7*n+16)*sizeof(double));
else
_work = (double *) malloc(3*n*sizeof(double));
// make sure that the window is big enough and call
// *_starneig_reorder_window directly if it is not
if (n < threshold) {
if (B != NULL) {
ld_lA = ld_lB = ld_lQ = ld_lZ = divceil(n, 8)*8;
err = cudaHostAlloc(
&_lA, 4*n*ld_lA*sizeof(double), cudaHostAllocDefault);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lB = _lA + n*ld_lA;
_lQ = _lB + n*ld_lB;
_lZ = _lQ + n*ld_lQ;
}
else {
ld_lA = ld_lQ = divceil(n, 8)*8;
err = cudaHostAlloc(
&_lA, 2*n*ld_lA*sizeof(double), cudaHostAllocDefault);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lQ = _lA + n*ld_lA;
}
// copy A matrix
err = cudaMemcpy2DAsync(
_lA, ld_lA*sizeof(double), A, ldA*sizeof(double),
n*sizeof(double), n, cudaMemcpyDeviceToHost, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// copy Q matrix
err = cudaMemcpy2DAsync(
_lQ, ld_lQ*sizeof(double), Q, ldQ*sizeof(double),
n*sizeof(double), n, cudaMemcpyDeviceToHost, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
// copy B matrix
err = cudaMemcpy2DAsync(
_lB, ld_lB*sizeof(double), B, ldB*sizeof(double),
n*sizeof(double), n, cudaMemcpyDeviceToHost, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// copy Z matrix
err = cudaMemcpy2DAsync(
_lZ, ld_lZ*sizeof(double), Z, ldZ*sizeof(double),
n*sizeof(double), n, cudaMemcpyDeviceToHost, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// reorder
int m;
if (B != NULL)
ret = starneig_dtgsen(0, term, ld_lQ, ld_lZ, ld_lA, ld_lB, &m,
_select, _lQ, _lZ, _lA, _lB, _work);
else
ret = starneig_dtrsen(
0, term, ld_lQ, ld_lA, &m, _select, _lQ, _lA, _work);
// store A matrix
err = cudaMemcpy2DAsync(
A, ldA*sizeof(double), _lA, ld_lA*sizeof(double),
n*sizeof(double), n, cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// store Q matrix
err = cudaMemcpy2DAsync(
Q, ldQ*sizeof(double), _lQ, ld_lQ*sizeof(double),
n*sizeof(double), n, cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
// store B matrix
err = cudaMemcpy2DAsync(
B, ldB*sizeof(double), _lB, ld_lB*sizeof(double),
n*sizeof(double), n, cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// store Z matrix
err = cudaMemcpy2DAsync(
Z, ldZ*sizeof(double), _lZ, ld_lZ*sizeof(double),
n*sizeof(double), n, cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
// if an error occurred, mark the whole window tainted
if (ret != 0)
for (int i = 0; i < n; i++)
_select[i] = 2;
goto cleanup;
}
// allocate host workspace
if (B != NULL) {
ld_lA = ld_lB = ld_lQ = ld_lZ = divceil(window_size, 8)*8;
err = cudaHostAlloc(
&_lA, 4*window_size*ld_lA*sizeof(double), cudaHostAllocDefault);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lB = _lA + window_size*ld_lA;
_lQ = _lB + window_size*ld_lB;
_lZ = _lQ + window_size*ld_lQ;
}
else {
ld_lA = ld_lQ = divceil(window_size, 8)*8;
err = cudaHostAlloc(
&_lA, 2*window_size*ld_lA*sizeof(double), cudaHostAllocDefault);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
_lQ = _lA + window_size*ld_lA;
}
// device side local transformation matrices
if (B != NULL) {
err = cudaMallocPitch(
&lQ, &ldlQ, window_size*sizeof(double), 2*window_size);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldlQ /= sizeof(double);
lZ = lQ + window_size*ldlQ;
ldlZ = ldlQ;
}
else {
err = cudaMallocPitch(
&lQ, &ldlQ, window_size*sizeof(double), window_size);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldlQ /= sizeof(double);
lZ = lQ;
ldlZ = ldlQ;
}
// device side scratch buffers for GEMM kernels
err = cudaMallocPitch(&hT, &ldhT, window_size*sizeof(double), n);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldhT /= sizeof(double);
err = cudaMallocPitch(&vT, &ldvT, n*sizeof(double), 2*window_size);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ldvT /= sizeof(double);
qT = vT + window_size*ldvT;
ldqT = ldvT;
// GEMM kernel streams
cudaStreamCreate(&left);
cudaStreamCreate(&right);
cudaStreamCreate(&right_q);
streams_created = 1;
// repeat until all chains have been processed
while (1) {
// place the window chain
int in_chain = 0;
for (int i = end; in_chain < window_size/2 && i < term; i++) {
if (_select[i]) {
in_chain++;
end = i+1;
}
}
// quit if the chain is empty
if (in_chain == 0)
goto cleanup;
// place the first window
int first = 1;
int wend = MIN(term, end+1);
int wbegin = MAX(begin, wend-window_size);
cudaEvent_t left_ready;
cudaEventCreate(&left_ready);
cudaEventRecord(left_ready, stream);
cudaEvent_t right_ready;
cudaEventCreate(&right_ready);
cudaEventRecord(right_ready, stream);
cudaEvent_t right_q_ready;
cudaEventCreate(&right_q_ready);
cudaEventRecord(right_q_ready, stream);
// repeat until all windows in the current chain have been processed
int in_window = 0;
while(1) {
// calculate window size
int wsize = wend-wbegin;
// the main stream should wait until all right-hand side updates
// have finished
cudaStreamWaitEvent(stream, right_ready, 0);
cudaEventDestroy(right_ready);
// copy padded window from the matrix A
err = cudaMemcpy2DAsync(_lA, ld_lA*sizeof(double),
A+(size_t)wbegin*ldA+wbegin, ldA*sizeof(double),
wsize*sizeof(double), wsize, cudaMemcpyDeviceToHost, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
// resize window if necessary
double *__lA = _lA;
if (wbegin != begin && _lA[1] == 0.0) {
wbegin++;
__lA = _lA+ld_lA+1;
}
if (first && wend < term && _lA[(wsize-2)*ld_lA+wsize-1] == 0.0)
wend--;
wsize = wend-wbegin;
// copy window from the matrix B
if (B != NULL) {
err = cudaMemcpy2DAsync(_lB, ld_lB*sizeof(double),
B+(size_t)wbegin*ldB+wbegin, ldB*sizeof(double),
wsize*sizeof(double), wsize, cudaMemcpyDeviceToHost,
stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
// reorder the window
if (B != NULL) {
starneig_init_local_q(wsize, ld_lQ, _lQ);
starneig_init_local_q(wsize, ld_lZ, _lZ);
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
ret = starneig_dtgsen(0, wsize, ld_lQ, ld_lZ, ld_lA, ld_lB,
&in_window, _select+wbegin, _lQ, _lZ, __lA, _lB, _work);
}
else {
starneig_init_local_q(wsize, ld_lQ, _lQ);
ret = starneig_dtrsen(0, wsize, ld_lQ, ld_lA,
&in_window, _select+wbegin, _lQ, __lA, _work);
}
// store window
err = cudaMemcpy2DAsync(
A+(size_t)wbegin*ldA+wbegin, ldA*sizeof(double),
__lA, ld_lA*sizeof(double),
wsize*sizeof(double), wsize, cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
err = cudaMemcpy2DAsync(
B+(size_t)wbegin*ldB+wbegin, ldB*sizeof(double),
_lB, ld_lB*sizeof(double),
wsize*sizeof(double), wsize, cudaMemcpyHostToDevice,
stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
// the main stream should wait until all left-hand side updates
// and Q/Z matrix updates have finished
cudaStreamWaitEvent(stream, left_ready, 0);
cudaEventDestroy(left_ready);
cudaStreamWaitEvent(stream, right_q_ready, 0);
cudaEventDestroy(right_q_ready);
// move transformation matrices to device memory
err = cudaMemcpy2DAsync(
lQ, ldlQ*sizeof(double), _lQ, ld_lQ*sizeof(double),
wsize*sizeof(double), wsize, cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (B != NULL) {
err = cudaMemcpy2DAsync(
lZ, ldlZ*sizeof(double), _lZ, ld_lZ*sizeof(double),
wsize*sizeof(double), wsize, cudaMemcpyHostToDevice,
stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
}
cudaEvent_t window_ready;
cudaEventCreate(&window_ready);
cudaEventRecord(window_ready, stream);
cudaStreamWaitEvent(left, window_ready, 0);
cudaStreamWaitEvent(right, window_ready, 0);
cudaStreamWaitEvent(right_q, window_ready, 0);
cudaEventDestroy(window_ready);
// apply the local transformation matrices lQ and lZ to Q and Z
if (Q != NULL)
right_gemm_update(right_q, handle,
0, MIN(term, end+1), wbegin, wend, ldlQ, ldQ, ldqT,
lQ, Q, qT);
if (Z != NULL)
right_gemm_update(right_q, handle,
0, MIN(term, end+1), wbegin, wend, ldlZ, ldZ, ldqT,
lZ, Z, qT);
// apply the local transformation matrices lQ and lZ to A
right_gemm_update(right, handle,
0, wbegin, wbegin, wend, ldlZ, ldA, ldvT, lZ, A, vT);
left_gemm_update(left, handle,
wbegin, wend, wend, n, ldlQ, ldA, ldhT, lQ, A, hT);
// apply the local transformation matrices lQ and lZ to Z
if (B != NULL) {
right_gemm_update(right, handle,
0, wbegin, wbegin, wend, ldlZ, ldB, ldvT, lZ, B, vT);
left_gemm_update(left, handle,
wbegin, wend, wend, n, ldlQ, ldB, ldhT, lQ, B, hT);
}
// if an error occurred, mark the current window and everything
// below it tainted
if (ret != 0) {
for (int i = wbegin; i < n; i++)
_select[i] = 2;
goto cleanup;
}
cudaEventCreate(&left_ready);
cudaEventRecord(left_ready, left);
// quit if this was the topmost window in the chain
if (wbegin == begin)
break;
cudaEventCreate(&right_ready);
cudaEventRecord(right_ready, right);
cudaEventCreate(&right_q_ready);
cudaEventRecord(right_q_ready, right_q);
// place the next window such that it does not split any 2-by-2
// tiles
first = 0;
wend = MIN(term, wbegin + in_window);
wbegin = MAX(begin, wend-window_size);
}
// the main stream should wait until all left-hand side updates
// from the previous window chain have finished
cudaStreamWaitEvent(stream, left_ready, 0);
cudaEventDestroy(left_ready);
// advance downwards
begin += in_window;
}
cleanup:
err = cudaMemcpyAsync(
select, _select, n*sizeof(int), cudaMemcpyHostToDevice, stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
if (streams_created) {
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaStreamSynchronize(left);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaStreamSynchronize(right);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
err = cudaStreamSynchronize(right_q);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
cudaStreamDestroy(left);
cudaStreamDestroy(right);
cudaStreamDestroy(right_q);
cublasSetStream(handle, stream);
}
cudaFreeHost(_select);
cudaFreeHost(_lA);
free(_work);
cudaFree(lQ);
cudaFree(vT);
cudaFree(hT);
return ret;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void starneig_cuda_reorder_window(void *buffers[], void *cl_arg)
{
struct packing_info packing_info_A, packing_info_B;
struct range_packing_info packing_info_selected;
int window_size, threshold, swaps;
starpu_codelet_unpack_args(cl_arg,
&packing_info_selected, &packing_info_A, &packing_info_B,
&window_size, &threshold, &swaps);
cudaError err;
cudaStream_t stream = starpu_cuda_get_local_stream();
cublasHandle_t handle = starpu_cublas_get_local_handle();
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
cublasSetStream(handle, stream);
int size = packing_info_A.rend - packing_info_A.rbegin;
int general = packing_info_B.handles != 0;
int k = 0;
// local matrix Q
struct starpu_matrix_interface *lQ_i =
(struct starpu_matrix_interface *)buffers[k++];
double *lQ_ptr = (double*) STARPU_MATRIX_GET_PTR(lQ_i);
int lQ_ld = STARPU_MATRIX_GET_LD(lQ_i);
init_local_q(stream, size, lQ_ld, lQ_ptr);
// local matrix Z
double *lZ_ptr = NULL;
int lZ_ld = 0;
if (general) {
struct starpu_matrix_interface *lZ_i =
(struct starpu_matrix_interface *)buffers[k++];
lZ_ptr = (double*) STARPU_MATRIX_GET_PTR(lZ_i);
lZ_ld = STARPU_MATRIX_GET_LD(lZ_i);
init_local_q(stream, size, lZ_ld, lZ_ptr);
}
// local matrix A
struct starpu_matrix_interface *lA_i =
(struct starpu_matrix_interface *)buffers[k++];
double *lA_ptr = (double*) STARPU_MATRIX_GET_PTR(lA_i);
int lA_ld = STARPU_MATRIX_GET_LD(lA_i);
// local matrix B
double *lB_ptr = NULL;
int lB_ld = 0;
if (general) {
struct starpu_matrix_interface *lB_i =
(struct starpu_matrix_interface *)buffers[k++];
lB_ptr = (double*) STARPU_MATRIX_GET_PTR(lB_i);
lB_ld = STARPU_MATRIX_GET_LD(lB_i);
}
// eigenvalue selection bitmap
int *selected;
err = cudaMalloc(&selected, size*sizeof(int));
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
struct starpu_vector_interface **select_i =
(struct starpu_vector_interface **)buffers + k;
k += packing_info_selected.handles;
uintptr_t* selected_ds = starneig_cuda_prepare_join_range(
&packing_info_selected, (void **)select_i);
starneig_cuda_join_range(
stream, &packing_info_selected, selected_ds, selected, 0);
// corresponding tiles from the matrix A
struct starpu_matrix_interface **A_i =
(struct starpu_matrix_interface **)buffers + k;
k += packing_info_A.handles;
struct tile_addr *A_ds =
starneig_cuda_prepare_join_window(&packing_info_A, (void **)A_i);
starneig_cuda_join_diag_window(
stream, &packing_info_A, A_ds, lA_ld, lA_ptr, 0);
// corresponding tiles from the matrix B
struct tile_addr *B_ds = NULL;
if (general) {
struct starpu_matrix_interface **B_i =
(struct starpu_matrix_interface **)buffers + k;
k += packing_info_B.handles;
B_ds =
starneig_cuda_prepare_join_window(&packing_info_B, (void **)B_i);
starneig_cuda_join_diag_window(
stream, &packing_info_B, B_ds, lB_ld, lB_ptr, 0);
}
// reorder
reorder_window(stream, handle,
window_size, threshold, size, lQ_ld, lZ_ld, lA_ld, lB_ld,
selected, lQ_ptr, lZ_ptr, lA_ptr, lB_ptr);
// store result
starneig_cuda_join_range(
stream, &packing_info_selected, selected_ds, selected, 1);
starneig_cuda_join_diag_window(
stream, &packing_info_A, A_ds, lA_ld, lA_ptr, 1);
if (general)
starneig_cuda_join_diag_window(
stream, &packing_info_B, B_ds, lB_ld, lB_ptr, 1);
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess)
STARPU_CUDA_REPORT_ERROR(err);
cudaFree(selected);
}
|
4ca715b549fb5784fe9be5f9d70305bbc34ec63f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <algorithm>
#include <random>
#include <chrono>
#include <sstream>
#include <exception>
#include <string>
#include <cstddef>
#include <hip/hip_runtime.h>
#include "../helpers.hpp"
/************************** SPMV IMPLEMENTATIONS BEGIN **********************/
namespace cpu_impl {
template <class T>
void spmv_csr(int M, int N, const int* RESTRICT row_ptr, const int* RESTRICT col_idxs, const T* RESTRICT values, const T* RESTRICT x, T* RESTRICT output)
{
for (int i = 0; i < M; i++)
{
// row i
auto row_start = row_ptr[i];
auto row_end = row_ptr[i + 1];
T accumulator = 0;
for (int idx = row_start; idx < row_end; idx++)
{
auto c = col_idxs[idx];
auto v = values[idx];
accumulator += v * x[c];
}
output[i] = accumulator;
}
}
}
namespace gpu_impl {
template <class T> __device__ T load_streaming(T* x) { return *x; }
template <> inline __device__ int load_streaming(int* x) {
int value;
asm("ld.global.cs.u32 %0, [%1];" : "=r"(value) : "l"(x));
return value;
}
template <> inline __device__ float load_streaming(float* x) {
float value;
asm("ld.global.cs.f32 %0, [%1];" : "=f"(value) : "l"(x));
return value;
}
template <> inline __device__ double load_streaming(double* x) {
double value;
asm("ld.global.cs.f64 %0, [%1];" : "=d"(value) : "l"(x));
return value;
}
template <class T>
__global__
void spmv_csr(int M, int N, const int* CUDA_RESTRICT row_ptr, const int* CUDA_RESTRICT col_idxs, const T* CUDA_RESTRICT values, const T* CUDA_RESTRICT x, T* CUDA_RESTRICT output)
{
constexpr int WARP_SIZE = 32;
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int warpId = threadId / WARP_SIZE;
int laneId = threadId % WARP_SIZE;
int num_warps = blockDim.x * gridDim.x / WARP_SIZE;
#pragma unroll 1
for (int i = warpId; i < M; i += num_warps)
{
// row i
auto row_start = row_ptr[i];
auto row_end = row_ptr[i + 1];
T local_accumulator = 0;
#pragma unroll 4
for (int idx = row_start + laneId; idx < row_end; idx += WARP_SIZE)
{
auto c = col_idxs[idx];
auto v = values[idx];
local_accumulator += v * x[c];
}
auto total = warpReduceSum(local_accumulator);
if (laneId == 0)
output[i] = total;
}
}
}
/************************** SPMV IMPLEMENTATIONS END **********************/
int main ()
{
using T = double;
constexpr int M = 4096, N = 4096;
constexpr T THRESHOLD = 1e-2;
constexpr float SPARSITY = 0.9;
// CSR faster for random sparse matrices
std::vector<int> row_ptr, col_idxs;
std::vector<T> values;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> dist(-1, 1);
row_ptr.push_back(0);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
auto prob = dist(gen);
if (std::abs(prob) > SPARSITY)
{
col_idxs.push_back(j);
values.push_back(dist(gen));
}
}
row_ptr.push_back(values.size());
}
std::vector<T> x(N), output_cpu(N);
random_fill(std::begin(x), std::end(x));
int* d_row_ptr, *d_col_idxs;
T *d_values;
CHECK_CUDA(hipMalloc(&d_row_ptr, row_ptr.size() * sizeof(int)));
CHECK_CUDA(hipMalloc(&d_col_idxs, col_idxs.size() * sizeof(int)));
CHECK_CUDA(hipMalloc(&d_values, values.size() * sizeof(T)));
T* d_x;
CHECK_CUDA(hipMalloc(&d_x, x.size() * sizeof(T)));
T* d_output;
CHECK_CUDA(hipMalloc(&d_output, output_cpu.size() * sizeof(T)));
std::cout << "CPU Evaluation:\n";
{
auto cpu_time = benchmark([&] {
cpu_impl::spmv_csr(M, N, row_ptr.data(), col_idxs.data(), values.data(), x.data(), output_cpu.data());
});
std::cout << "\tRunning time: " << to_milliseconds(cpu_time).count() << "ms\n";
}
std::cout << std::endl;
std::vector<T> output_gpu(output_cpu.size());
std::cout << "GPU Evaluation:\n";
{
auto cpu_time = benchmark([&] {
CHECK_CUDA(hipMemcpy(d_row_ptr, row_ptr.data(), row_ptr.size() * sizeof(int), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(d_col_idxs, col_idxs.data(), col_idxs.size() * sizeof(int), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(d_values, values.data(), values.size() * sizeof(T), hipMemcpyHostToDevice));
CHECK_CUDA(hipMemcpy(d_x, x.data(), x.size() * sizeof(T), hipMemcpyHostToDevice));
int grid_size = 0, block_size = 0;
CHECK_CUDA(hipOccupancyMaxPotentialBlockSize(&grid_size, &block_size, gpu_impl::spmv_csr<T>, 0));
hipLaunchKernelGGL(( gpu_impl::spmv_csr), dim3(grid_size), dim3(block_size), 0, 0, M, N, d_row_ptr, d_col_idxs, d_values, d_x, d_output);
CHECK_CUDA(hipMemcpy(output_gpu.data(), d_output, output_gpu.size() * sizeof(T), hipMemcpyDeviceToHost));
});
std::cout << "\tRunning time (incl. memory copy): " << to_milliseconds(cpu_time).count() << "ms\n" << std::endl;
}
auto pr = check_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD);
print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD);
CHECK_CUDA(hipFree(d_row_ptr));
CHECK_CUDA(hipFree(d_col_idxs));
CHECK_CUDA(hipFree(d_values));
CHECK_CUDA(hipFree(d_x));
CHECK_CUDA(hipFree(d_output));
return 0;
} | 4ca715b549fb5784fe9be5f9d70305bbc34ec63f.cu | #include <iostream>
#include <vector>
#include <algorithm>
#include <random>
#include <chrono>
#include <sstream>
#include <exception>
#include <string>
#include <cstddef>
#include <cuda_runtime.h>
#include "../helpers.hpp"
/************************** SPMV IMPLEMENTATIONS BEGIN **********************/
namespace cpu_impl {
template <class T>
void spmv_csr(int M, int N, const int* RESTRICT row_ptr, const int* RESTRICT col_idxs, const T* RESTRICT values, const T* RESTRICT x, T* RESTRICT output)
{
for (int i = 0; i < M; i++)
{
// row i
auto row_start = row_ptr[i];
auto row_end = row_ptr[i + 1];
T accumulator = 0;
for (int idx = row_start; idx < row_end; idx++)
{
auto c = col_idxs[idx];
auto v = values[idx];
accumulator += v * x[c];
}
output[i] = accumulator;
}
}
}
namespace gpu_impl {
template <class T> __device__ T load_streaming(T* x) { return *x; }
template <> inline __device__ int load_streaming(int* x) {
int value;
asm("ld.global.cs.u32 %0, [%1];" : "=r"(value) : "l"(x));
return value;
}
template <> inline __device__ float load_streaming(float* x) {
float value;
asm("ld.global.cs.f32 %0, [%1];" : "=f"(value) : "l"(x));
return value;
}
template <> inline __device__ double load_streaming(double* x) {
double value;
asm("ld.global.cs.f64 %0, [%1];" : "=d"(value) : "l"(x));
return value;
}
template <class T>
__global__
void spmv_csr(int M, int N, const int* CUDA_RESTRICT row_ptr, const int* CUDA_RESTRICT col_idxs, const T* CUDA_RESTRICT values, const T* CUDA_RESTRICT x, T* CUDA_RESTRICT output)
{
constexpr int WARP_SIZE = 32;
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int warpId = threadId / WARP_SIZE;
int laneId = threadId % WARP_SIZE;
int num_warps = blockDim.x * gridDim.x / WARP_SIZE;
#pragma unroll 1
for (int i = warpId; i < M; i += num_warps)
{
// row i
auto row_start = row_ptr[i];
auto row_end = row_ptr[i + 1];
T local_accumulator = 0;
#pragma unroll 4
for (int idx = row_start + laneId; idx < row_end; idx += WARP_SIZE)
{
auto c = col_idxs[idx];
auto v = values[idx];
local_accumulator += v * x[c];
}
auto total = warpReduceSum(local_accumulator);
if (laneId == 0)
output[i] = total;
}
}
}
/************************** SPMV IMPLEMENTATIONS END **********************/
int main ()
{
using T = double;
constexpr int M = 4096, N = 4096;
constexpr T THRESHOLD = 1e-2;
constexpr float SPARSITY = 0.9;
// CSR faster for random sparse matrices
std::vector<int> row_ptr, col_idxs;
std::vector<T> values;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> dist(-1, 1);
row_ptr.push_back(0);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
auto prob = dist(gen);
if (std::abs(prob) > SPARSITY)
{
col_idxs.push_back(j);
values.push_back(dist(gen));
}
}
row_ptr.push_back(values.size());
}
std::vector<T> x(N), output_cpu(N);
random_fill(std::begin(x), std::end(x));
int* d_row_ptr, *d_col_idxs;
T *d_values;
CHECK_CUDA(cudaMalloc(&d_row_ptr, row_ptr.size() * sizeof(int)));
CHECK_CUDA(cudaMalloc(&d_col_idxs, col_idxs.size() * sizeof(int)));
CHECK_CUDA(cudaMalloc(&d_values, values.size() * sizeof(T)));
T* d_x;
CHECK_CUDA(cudaMalloc(&d_x, x.size() * sizeof(T)));
T* d_output;
CHECK_CUDA(cudaMalloc(&d_output, output_cpu.size() * sizeof(T)));
std::cout << "CPU Evaluation:\n";
{
auto cpu_time = benchmark([&] {
cpu_impl::spmv_csr(M, N, row_ptr.data(), col_idxs.data(), values.data(), x.data(), output_cpu.data());
});
std::cout << "\tRunning time: " << to_milliseconds(cpu_time).count() << "ms\n";
}
std::cout << std::endl;
std::vector<T> output_gpu(output_cpu.size());
std::cout << "GPU Evaluation:\n";
{
auto cpu_time = benchmark([&] {
CHECK_CUDA(cudaMemcpy(d_row_ptr, row_ptr.data(), row_ptr.size() * sizeof(int), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(d_col_idxs, col_idxs.data(), col_idxs.size() * sizeof(int), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(d_values, values.data(), values.size() * sizeof(T), cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(d_x, x.data(), x.size() * sizeof(T), cudaMemcpyHostToDevice));
int grid_size = 0, block_size = 0;
CHECK_CUDA(cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, gpu_impl::spmv_csr<T>, 0));
gpu_impl::spmv_csr<<<grid_size, block_size>>>(M, N, d_row_ptr, d_col_idxs, d_values, d_x, d_output);
CHECK_CUDA(cudaMemcpy(output_gpu.data(), d_output, output_gpu.size() * sizeof(T), cudaMemcpyDeviceToHost));
});
std::cout << "\tRunning time (incl. memory copy): " << to_milliseconds(cpu_time).count() << "ms\n" << std::endl;
}
auto pr = check_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD);
print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD);
CHECK_CUDA(cudaFree(d_row_ptr));
CHECK_CUDA(cudaFree(d_col_idxs));
CHECK_CUDA(cudaFree(d_values));
CHECK_CUDA(cudaFree(d_x));
CHECK_CUDA(cudaFree(d_output));
return 0;
} |
330841ef60b57f2ef69de0b296026dbf9eaa1f3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
double seconds();
int cpuReduce(int *N, int const size);
__global__ void warmup(int *I, int *O, unsigned int *N);
__global__ void gpuReduceRecursive(int *I, int *O, unsigned int n);
__global__ void gpuReduceRecursiveL(int *I, int *O, unsigned int n);
__global__ void gpuReduceInterleaved(int *I, int *O, unsigned int n);
__global__ void gpuReduceInterleavedUnrolling2(int *I, int *O, unsigned int n);
int main(int argc, char **argv) {
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
CHECK(hipSetDevice(dev));
printf("%s starting... reduction\n", argv[0]);
printf("Using device %d: %s\n", dev, deviceProp.name);
// initialization
int size = 1 << 14;
printf("With array size %d\n", size);
// execution configuration
int blockSize = 512;
if (argc > 1) blockSize = atoi(argv[1]);
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("grid(%d,1), block(%d,1)\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *ipt = (int*)malloc(bytes);
int *opt = (int*)malloc(grid.x * sizeof(int));
int *tmp = (int*)malloc(bytes);
// allocate device memory
int *d_I, *d_O;
CHECK(hipMalloc((int**)&d_I, bytes));
CHECK(hipMalloc((int**)&d_O, grid.x * sizeof(int)));
// initialize host array
for (int i = 0; i < size; i++)
ipt[i] = (int)(rand() & 0xFF);
memcpy(tmp, ipt, bytes);
double iStart, iElaps;
int cpuSum, gpuSum;
// ---------- CPU reduce ---------- //
iStart = seconds();
cpuSum = cpuReduce(tmp, size);
iElaps = seconds() - iStart;
printf("CPU: %lfs\n", iElaps);
// ---------- KERNEL 1: Original Reduce ---------- //
CHECK(hipMemcpy(d_I, ipt, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( gpuReduceRecursive), dim3(grid), dim3(block), 0, 0, d_I, d_O, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 1): %lfs\n", iElaps);
CHECK(hipMemcpy(opt, d_O, grid.x * sizeof(int), hipMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 1 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 2: Original Reduce L ---------- //
CHECK(hipMemcpy(d_I, ipt, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( gpuReduceRecursiveL), dim3(grid), dim3(block), 0, 0, d_I, d_O, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 2): %lfs\n", iElaps);
CHECK(hipMemcpy(opt, d_O, grid.x * sizeof(int), hipMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 2 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 3: Original Reduce ---------- //
CHECK(hipMemcpy(d_I, ipt, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( gpuReduceInterleaved), dim3(grid), dim3(block), 0, 0, d_I, d_O, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 3): %lfs\n", iElaps);
CHECK(hipMemcpy(opt, d_O, grid.x * sizeof(int), hipMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 3 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 4: Original Reduce ---------- //
CHECK(hipMemcpy(d_I, ipt, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( gpuReduceInterleavedUnrolling2), dim3(grid.x / 2), dim3(block), 0, 0, d_I, d_O, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 4): %lfs\n", iElaps);
CHECK(hipMemcpy(opt, d_O, grid.x * sizeof(int) / 2, hipMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x / 2; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 4 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 4: Original Reduce ---------- //
// TODO: copy host data to device
// TODO: reduce on device
// TODO: copy device result to host
// TODO: print gpu time
// TODO: compute gpu sum
// TODO: check gpu sum
free(ipt);
free(opt);
free(tmp);
CHECK(hipFree(d_I));
CHECK(hipFree(d_O));
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
int cpuReduce(int *N, const int size) {
if (size == 1) return N[0];
int stride = size / 2;
for(int i = 0; i < stride; i++)
N[i] += N[i + stride];
return cpuReduce(N, stride);
}
/*
EXAMPLE:
Loop 1:
stride :<_>_ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
thread : 0 1 2 3 4 5 6 7
| / | / | / | /
|/ |/ |/ |/
inactive: | 1 | 3 | 5 | 7 ===> (tid % (2 * 1) != 0)
active : 0 2 4 6 ===> (tid % (2 * 1) == 0)
Loop 2:
stride :<_ _>_ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
thread : 0 2 4 6
| / | /
| / | /
inactive: |/ 2 |/ 6 ===> (tid % (2 * 2) != 0)
active : 0 4 ===> (tid % (2 * 2) == 0)
Loop 3:
stride :<_ _ _ _>_ _ _ _
block :|_|_|_|_|_|_|_|_|
thread : 0 4
| /
| /
| /
|/
inactive: | 4 ===> (tid % (2 * 4) != 0)
active : 0 ===> (tid % (2 * 4) == 0)
ANALYZE:
Only about half threads will be executed.
*/
__global__ void gpuReduceRecursive(int *I, int *O, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
int *N = I + blockIdx.x * blockDim.x; // N: begin address of global memory in block
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0)
N[tid] += N[tid + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
/*
EXAMPLE
_ _ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
Loop1 : 0 | 1 | 2 | 3 | 4...7(inactive)
|/ |/ |/ |/
Loop2 : 0 | 1 | 2...7(inactive)
| / | /
| / | /
|/ |/
Loop3 : 0_______| 1...7(inactive)
|
0
ANALYZE
A bunch of thread will active, another will inactive.
*/
__global__ void gpuReduceRecursiveL(int *I, int *O, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
int *N = I + blockIdx.x * blockDim.x; // N: begin address of global memory in block
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int index = 2 * stride * tid;
if (index < blockDim.x)
N[index] += N[index + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
/*
EXAMPLE
_ _ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
Loop1 : 0 1 2 3(4 5 6 7)
| | | |/ / / /
| | |/|/ / /
| |/|/|/ /
|/|/|/|/
Loop2 : 0 1(2 3)
| |/ /
|/|/
Loop3 : 0(1)
|/
0
*: threads in () are inactive.
ANALYZE
More efficient memory usage.
*/
__global__ void gpuReduceInterleaved(int *I, int *O, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
int *N = I + blockIdx.x * blockDim.x; // N: begin address of global memory in block
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride)
N[tid] += N[tid + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
/*
EXAMPLE:
_ _ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
0 1 2 3 4 5 6 7
unroll : | | | |/ / / /
| | |/|/ / /
| |/|/|/ /
|/|/|/|/
0 1 2 3
gpuReduceInterleaved();
*/
__global__ void gpuReduceInterleavedUnrolling2(int *I, int *O, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + 2 * blockIdx.x * blockDim.x;
if (idx + blockDim.x < n) I[idx] += I[idx + blockDim.x]; // unroll
__syncthreads();
int *N = I + 2 * blockIdx.x * blockDim.x;
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) N[tid] += N[tid + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
| 330841ef60b57f2ef69de0b296026dbf9eaa1f3f.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
double seconds();
int cpuReduce(int *N, int const size);
__global__ void warmup(int *I, int *O, unsigned int *N);
__global__ void gpuReduceRecursive(int *I, int *O, unsigned int n);
__global__ void gpuReduceRecursiveL(int *I, int *O, unsigned int n);
__global__ void gpuReduceInterleaved(int *I, int *O, unsigned int n);
__global__ void gpuReduceInterleavedUnrolling2(int *I, int *O, unsigned int n);
int main(int argc, char **argv) {
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
CHECK(cudaSetDevice(dev));
printf("%s starting... reduction\n", argv[0]);
printf("Using device %d: %s\n", dev, deviceProp.name);
// initialization
int size = 1 << 14;
printf("With array size %d\n", size);
// execution configuration
int blockSize = 512;
if (argc > 1) blockSize = atoi(argv[1]);
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("grid(%d,1), block(%d,1)\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *ipt = (int*)malloc(bytes);
int *opt = (int*)malloc(grid.x * sizeof(int));
int *tmp = (int*)malloc(bytes);
// allocate device memory
int *d_I, *d_O;
CHECK(cudaMalloc((int**)&d_I, bytes));
CHECK(cudaMalloc((int**)&d_O, grid.x * sizeof(int)));
// initialize host array
for (int i = 0; i < size; i++)
ipt[i] = (int)(rand() & 0xFF);
memcpy(tmp, ipt, bytes);
double iStart, iElaps;
int cpuSum, gpuSum;
// ---------- CPU reduce ---------- //
iStart = seconds();
cpuSum = cpuReduce(tmp, size);
iElaps = seconds() - iStart;
printf("CPU: %lfs\n", iElaps);
// ---------- KERNEL 1: Original Reduce ---------- //
CHECK(cudaMemcpy(d_I, ipt, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
gpuReduceRecursive<<<grid, block>>>(d_I, d_O, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 1): %lfs\n", iElaps);
CHECK(cudaMemcpy(opt, d_O, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 1 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 2: Original Reduce L ---------- //
CHECK(cudaMemcpy(d_I, ipt, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
gpuReduceRecursiveL<<<grid, block>>>(d_I, d_O, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 2): %lfs\n", iElaps);
CHECK(cudaMemcpy(opt, d_O, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 2 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 3: Original Reduce ---------- //
CHECK(cudaMemcpy(d_I, ipt, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
gpuReduceInterleaved<<<grid, block>>>(d_I, d_O, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 3): %lfs\n", iElaps);
CHECK(cudaMemcpy(opt, d_O, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 3 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 4: Original Reduce ---------- //
CHECK(cudaMemcpy(d_I, ipt, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
gpuReduceInterleavedUnrolling2<<<grid.x / 2, block>>>(d_I, d_O, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("GPU(KERNEL 4): %lfs\n", iElaps);
CHECK(cudaMemcpy(opt, d_O, grid.x * sizeof(int) / 2, cudaMemcpyDeviceToHost));
gpuSum = 0;
for(int i = 0; i < grid.x / 2; i++)
gpuSum += opt[i];
if (gpuSum != cpuSum)
printf("Kernel 4 does not match.\nCPU: %d GPU %d\n", cpuSum, gpuSum);
// ---------- KERNEL 4: Original Reduce ---------- //
// TODO: copy host data to device
// TODO: reduce on device
// TODO: copy device result to host
// TODO: print gpu time
// TODO: compute gpu sum
// TODO: check gpu sum
free(ipt);
free(opt);
free(tmp);
CHECK(cudaFree(d_I));
CHECK(cudaFree(d_O));
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
int cpuReduce(int *N, const int size) {
if (size == 1) return N[0];
int stride = size / 2;
for(int i = 0; i < stride; i++)
N[i] += N[i + stride];
return cpuReduce(N, stride);
}
/*
EXAMPLE:
Loop 1:
stride :<_>_ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
thread : 0 1 2 3 4 5 6 7
| / | / | / | /
|/ |/ |/ |/
inactive: | 1 | 3 | 5 | 7 ===> (tid % (2 * 1) != 0)
active : 0 2 4 6 ===> (tid % (2 * 1) == 0)
Loop 2:
stride :<_ _>_ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
thread : 0 2 4 6
| / | /
| / | /
inactive: |/ 2 |/ 6 ===> (tid % (2 * 2) != 0)
active : 0 4 ===> (tid % (2 * 2) == 0)
Loop 3:
stride :<_ _ _ _>_ _ _ _
block :|_|_|_|_|_|_|_|_|
thread : 0 4
| /
| /
| /
|/
inactive: | 4 ===> (tid % (2 * 4) != 0)
active : 0 ===> (tid % (2 * 4) == 0)
ANALYZE:
Only about half threads will be executed.
*/
__global__ void gpuReduceRecursive(int *I, int *O, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
int *N = I + blockIdx.x * blockDim.x; // N: begin address of global memory in block
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0)
N[tid] += N[tid + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
/*
EXAMPLE
_ _ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
Loop1 : 0 | 1 | 2 | 3 | 4...7(inactive)
|/ |/ |/ |/
Loop2 : 0 | 1 | 2...7(inactive)
| / | /
| / | /
|/ |/
Loop3 : 0_______| 1...7(inactive)
|
0
ANALYZE
A bunch of thread will active, another will inactive.
*/
__global__ void gpuReduceRecursiveL(int *I, int *O, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
int *N = I + blockIdx.x * blockDim.x; // N: begin address of global memory in block
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int index = 2 * stride * tid;
if (index < blockDim.x)
N[index] += N[index + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
/*
EXAMPLE
_ _ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
Loop1 : 0 1 2 3(4 5 6 7)
| | | |/ / / /
| | |/|/ / /
| |/|/|/ /
|/|/|/|/
Loop2 : 0 1(2 3)
| |/ /
|/|/
Loop3 : 0(1)
|/
0
*: threads in () are inactive.
ANALYZE
More efficient memory usage.
*/
__global__ void gpuReduceInterleaved(int *I, int *O, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
int *N = I + blockIdx.x * blockDim.x; // N: begin address of global memory in block
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride)
N[tid] += N[tid + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
/*
EXAMPLE:
_ _ _ _ _ _ _ _
block :|_|_|_|_|_|_|_|_|
0 1 2 3 4 5 6 7
unroll : | | | |/ / / /
| | |/|/ / /
| |/|/|/ /
|/|/|/|/
0 1 2 3
gpuReduceInterleaved();
*/
__global__ void gpuReduceInterleavedUnrolling2(int *I, int *O, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + 2 * blockIdx.x * blockDim.x;
if (idx + blockDim.x < n) I[idx] += I[idx + blockDim.x]; // unroll
__syncthreads();
int *N = I + 2 * blockIdx.x * blockDim.x;
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) N[tid] += N[tid + stride];
__syncthreads();
}
if (tid == 0) O[blockIdx.x] = N[0];
}
|
740d8343f92725199360251827f33c9cedae51a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ComputeRobustnessMask( const float3* __restrict__ rawImgRef, const float3* __restrict__ rawImgMoved, float4* __restrict__ robustnessMask, hipTextureObject_t texUV, int imgWidth, int imgHeight, int imgPitch, int maskPitch, float alpha, float beta, float thresholdM)
{
int pxX = blockIdx.x * blockDim.x + threadIdx.x;
int pxY = blockIdx.y * blockDim.y + threadIdx.y;
extern __shared__ float3 pixelsRef[];
int sharedOffset = 3 * 3 * (threadIdx.y * blockDim.x + threadIdx.x);
if (pxX >= imgWidth - 1|| pxY >= imgHeight - 1 || pxX < 1 || pxY < 1)
return;
float3 meanRef = make_float3(0, 0, 0);
float3 meanMoved = make_float3(0, 0, 0);
float3 stdRef = make_float3(0, 0, 0);
float3 stdMoved = make_float3(0, 0, 0);
float3 dist = make_float3(0, 0, 0);
float3 sigma = make_float3(0, 0, 0);
float2 shiftf = tex2D<float2>(texUV, ((float)pxX + 0.5f) / (float)imgWidth, ((float)pxY + 0.5f) / (float)imgHeight);
float2 maxShift = shiftf;
float2 minShift = shiftf;
for (int y = -2; y <= 2; y++)
{
for (int x = -2; x <= 2; x++)
{
float2 s = tex2D<float2>(texUV, ((float)pxX + x + 0.5f) / (float)imgWidth, ((float)pxY + y + 0.5f) / (float)imgHeight);
maxShift.x = fmaxf(s.x, shiftf.x);
maxShift.y = fmaxf(s.y, shiftf.y);
minShift.x = fminf(s.x, shiftf.x);
minShift.y = fminf(s.y, shiftf.y);
}
}
int2 shift;
//half resolution image:
shift.x = roundf(shiftf.x * 0.5f);
shift.y = roundf(shiftf.y * 0.5f);
for (int y = -1; y <= 1; y++)
{
for (int x = -1; x <= 1; x++)
{
float3 p = *(((float3*)((char*)rawImgRef + imgPitch * (pxY + y))) + pxX + x);
pixelsRef[sharedOffset + (y + 1) * 3 + (x + 1)] = p;
meanRef.x += p.x;
meanRef.y += p.y;
meanRef.z += p.z;
int ppy = min(max(pxY + shift.y + y, 0), imgHeight - 1);
int ppx = min(max(pxX + shift.x + x, 0), imgWidth - 1);
p = *(((float3*)((char*)rawImgMoved + imgPitch * (ppy))) + ppx);
meanMoved.x += p.x;
meanMoved.y += p.y;
meanMoved.z += p.z;
}
}
meanRef.x /= 9.0f;
meanRef.y /= 9.0f;
meanRef.z /= 9.0f;
meanMoved.x /= 9.0f;
meanMoved.y /= 9.0f;
meanMoved.z /= 9.0f;
float meandist = fabs(meanRef.x - meanMoved.x) + fabs(meanRef.y - meanMoved.y) + fabs(meanRef.z - meanMoved.z);
meandist /= 3.0f;
maxShift.x *= 0.5f * meandist;
maxShift.y *= 0.5f * meandist;
minShift.x *= 0.5f * meandist;
minShift.y *= 0.5f * meandist;
float M = sqrtf((maxShift.x - minShift.x) * (maxShift.x - minShift.x) + (maxShift.y - minShift.y) * (maxShift.y - minShift.y));
for (int y = -1; y <= 1; y++)
{
for (int x = -1; x <= 1; x++)
{
int p = sharedOffset + (y + 1) * 3 + (x + 1);
stdRef.x += (pixelsRef[p].x - meanRef.x) * (pixelsRef[p].x - meanRef.x);
stdRef.y += (pixelsRef[p].y - meanRef.y) * (pixelsRef[p].y - meanRef.y);
stdRef.z += (pixelsRef[p].z - meanRef.z) * (pixelsRef[p].z - meanRef.z);
}
}
stdRef.x = sqrtf(stdRef.x / 9.0f);
stdRef.y = sqrtf(stdRef.y / 9.0f);
stdRef.z = sqrtf(stdRef.z / 9.0f);
float3 sigmaMD;
sigmaMD.x = sqrtf(alpha * meanRef.x + beta);
sigmaMD.y = sqrtf(alpha * meanRef.y + beta) / sqrtf(2.0f); //we have two green pixels averaged --> devide by sqrtf(2);
sigmaMD.z = sqrtf(alpha * meanRef.z + beta);
dist.x = fabs(meanRef.x - meanMoved.x);
dist.y = fabs(meanRef.y - meanMoved.y);
dist.z = fabs(meanRef.z - meanMoved.z);
sigma.x = fmaxf(sigmaMD.x, stdRef.x);
sigma.y = fmaxf(sigmaMD.y, stdRef.y);
sigma.z = fmaxf(sigmaMD.z, stdRef.z);
dist.x = dist.x * (stdRef.x * stdRef.x / (stdRef.x * stdRef.x + sigmaMD.x * sigmaMD.x));
dist.y = dist.y * (stdRef.y * stdRef.y / (stdRef.y * stdRef.y + sigmaMD.y * sigmaMD.y));
dist.z = dist.z * (stdRef.z * stdRef.z / (stdRef.z * stdRef.z + sigmaMD.z * sigmaMD.z));/**/
float4 mask;
float s = 1.5f;
if (M > thresholdM)
s = 0;
const float t = 0.12f;
mask.x = fmaxf(fminf(s * exp(-dist.x * dist.x / (sigma.x * sigma.x)) - t, 1.0f), 0.0f);
mask.y = fmaxf(fminf(s * exp(-dist.y * dist.y / (sigma.y * sigma.y)) - t, 1.0f), 0.0f);
mask.z = fmaxf(fminf(s * exp(-dist.z * dist.z / (sigma.z * sigma.z)) - t, 1.0f), 0.0f);
mask.w = M;
*(((float4*)((char*)robustnessMask + maskPitch * pxY)) + pxX) = mask;
} | 740d8343f92725199360251827f33c9cedae51a0.cu | #include "includes.h"
__global__ void ComputeRobustnessMask( const float3* __restrict__ rawImgRef, const float3* __restrict__ rawImgMoved, float4* __restrict__ robustnessMask, cudaTextureObject_t texUV, int imgWidth, int imgHeight, int imgPitch, int maskPitch, float alpha, float beta, float thresholdM)
{
int pxX = blockIdx.x * blockDim.x + threadIdx.x;
int pxY = blockIdx.y * blockDim.y + threadIdx.y;
extern __shared__ float3 pixelsRef[];
int sharedOffset = 3 * 3 * (threadIdx.y * blockDim.x + threadIdx.x);
if (pxX >= imgWidth - 1|| pxY >= imgHeight - 1 || pxX < 1 || pxY < 1)
return;
float3 meanRef = make_float3(0, 0, 0);
float3 meanMoved = make_float3(0, 0, 0);
float3 stdRef = make_float3(0, 0, 0);
float3 stdMoved = make_float3(0, 0, 0);
float3 dist = make_float3(0, 0, 0);
float3 sigma = make_float3(0, 0, 0);
float2 shiftf = tex2D<float2>(texUV, ((float)pxX + 0.5f) / (float)imgWidth, ((float)pxY + 0.5f) / (float)imgHeight);
float2 maxShift = shiftf;
float2 minShift = shiftf;
for (int y = -2; y <= 2; y++)
{
for (int x = -2; x <= 2; x++)
{
float2 s = tex2D<float2>(texUV, ((float)pxX + x + 0.5f) / (float)imgWidth, ((float)pxY + y + 0.5f) / (float)imgHeight);
maxShift.x = fmaxf(s.x, shiftf.x);
maxShift.y = fmaxf(s.y, shiftf.y);
minShift.x = fminf(s.x, shiftf.x);
minShift.y = fminf(s.y, shiftf.y);
}
}
int2 shift;
//half resolution image:
shift.x = roundf(shiftf.x * 0.5f);
shift.y = roundf(shiftf.y * 0.5f);
for (int y = -1; y <= 1; y++)
{
for (int x = -1; x <= 1; x++)
{
float3 p = *(((float3*)((char*)rawImgRef + imgPitch * (pxY + y))) + pxX + x);
pixelsRef[sharedOffset + (y + 1) * 3 + (x + 1)] = p;
meanRef.x += p.x;
meanRef.y += p.y;
meanRef.z += p.z;
int ppy = min(max(pxY + shift.y + y, 0), imgHeight - 1);
int ppx = min(max(pxX + shift.x + x, 0), imgWidth - 1);
p = *(((float3*)((char*)rawImgMoved + imgPitch * (ppy))) + ppx);
meanMoved.x += p.x;
meanMoved.y += p.y;
meanMoved.z += p.z;
}
}
meanRef.x /= 9.0f;
meanRef.y /= 9.0f;
meanRef.z /= 9.0f;
meanMoved.x /= 9.0f;
meanMoved.y /= 9.0f;
meanMoved.z /= 9.0f;
float meandist = fabs(meanRef.x - meanMoved.x) + fabs(meanRef.y - meanMoved.y) + fabs(meanRef.z - meanMoved.z);
meandist /= 3.0f;
maxShift.x *= 0.5f * meandist;
maxShift.y *= 0.5f * meandist;
minShift.x *= 0.5f * meandist;
minShift.y *= 0.5f * meandist;
float M = sqrtf((maxShift.x - minShift.x) * (maxShift.x - minShift.x) + (maxShift.y - minShift.y) * (maxShift.y - minShift.y));
for (int y = -1; y <= 1; y++)
{
for (int x = -1; x <= 1; x++)
{
int p = sharedOffset + (y + 1) * 3 + (x + 1);
stdRef.x += (pixelsRef[p].x - meanRef.x) * (pixelsRef[p].x - meanRef.x);
stdRef.y += (pixelsRef[p].y - meanRef.y) * (pixelsRef[p].y - meanRef.y);
stdRef.z += (pixelsRef[p].z - meanRef.z) * (pixelsRef[p].z - meanRef.z);
}
}
stdRef.x = sqrtf(stdRef.x / 9.0f);
stdRef.y = sqrtf(stdRef.y / 9.0f);
stdRef.z = sqrtf(stdRef.z / 9.0f);
float3 sigmaMD;
sigmaMD.x = sqrtf(alpha * meanRef.x + beta);
sigmaMD.y = sqrtf(alpha * meanRef.y + beta) / sqrtf(2.0f); //we have two green pixels averaged --> devide by sqrtf(2);
sigmaMD.z = sqrtf(alpha * meanRef.z + beta);
dist.x = fabs(meanRef.x - meanMoved.x);
dist.y = fabs(meanRef.y - meanMoved.y);
dist.z = fabs(meanRef.z - meanMoved.z);
sigma.x = fmaxf(sigmaMD.x, stdRef.x);
sigma.y = fmaxf(sigmaMD.y, stdRef.y);
sigma.z = fmaxf(sigmaMD.z, stdRef.z);
dist.x = dist.x * (stdRef.x * stdRef.x / (stdRef.x * stdRef.x + sigmaMD.x * sigmaMD.x));
dist.y = dist.y * (stdRef.y * stdRef.y / (stdRef.y * stdRef.y + sigmaMD.y * sigmaMD.y));
dist.z = dist.z * (stdRef.z * stdRef.z / (stdRef.z * stdRef.z + sigmaMD.z * sigmaMD.z));/**/
float4 mask;
float s = 1.5f;
if (M > thresholdM)
s = 0;
const float t = 0.12f;
mask.x = fmaxf(fminf(s * exp(-dist.x * dist.x / (sigma.x * sigma.x)) - t, 1.0f), 0.0f);
mask.y = fmaxf(fminf(s * exp(-dist.y * dist.y / (sigma.y * sigma.y)) - t, 1.0f), 0.0f);
mask.z = fmaxf(fminf(s * exp(-dist.z * dist.z / (sigma.z * sigma.z)) - t, 1.0f), 0.0f);
mask.w = M;
*(((float4*)((char*)robustnessMask + maskPitch * pxY)) + pxX) = mask;
} |
d39ed1b578aaa0435a8643a703e5adc9aaa01e6a.hip | // !!! This is a file automatically generated by hipify!!!
// memset
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "mqx.h"
#include "test.h"
void init_rand_data(void *data, size_t size)
{
size_t i;
for (i = 0; i < size; i += sizeof(int)) {
if (i + sizeof(int) <= size)
*((int *)((char *)data + i)) = rand();
else
break;
}
if (i < size) {
while (i < size) {
*((char *)((char *)data + i)) = rand() % 256;
++i;
}
}
}
int cmp_data(void *data1, void *data2, size_t size)
{
unsigned char *c1 = (unsigned char *)data1;
unsigned char *c2 = (unsigned char *)data2;
size_t i;
for (i = 0; i < size; i++) {
if (*(c1 + i) < *(c2 + i)) {
MQX_TPRINT("Diff: i = %lu, c1 = %d, c2 = %d",
i, *(c1 + i), *(c2 + i));
return -1;
}
else if (*(c1 + i) > *(c2 + i)) {
MQX_TPRINT("Diff: i = %lu, c1 = %d, c2 = %d",
i, *(c1 + i), *(c2 + i));
return 1;
}
}
return 0;
}
int do_test_memset(size_t size, size_t memset_off, size_t memset_len)
{
void *dptr, *ptr, *ptr2;
int memset_value = 11;
int ret = 0;
// Mallocs
ptr = malloc(size);
if (!ptr) {
MQX_TPRINT("malloc failed for ptr");
return -1;
}
ptr2 = malloc(size);
if (!ptr2) {
MQX_TPRINT("malloc failed for ptr2");
free(ptr);
return -1;
}
if (hipMalloc(&dptr, size) != hipSuccess) {
MQX_TPRINT("hipMalloc failed for dptr");
free(ptr2);
free(ptr);
return -1;
}
// Initialize source buffer and the device memory region
init_rand_data(ptr, size);
if (hipMemcpy(dptr, ptr, size, hipMemcpyHostToDevice) != hipSuccess) {
MQX_TPRINT("hipMemcpy HtoD failed");
ret = -1;
goto finish;
}
//MQX_TPRINT("Device memory region initialized");
if (hipMemset((char *)dptr + memset_off, memset_value, memset_len)
!= hipSuccess) {
MQX_TPRINT("hipMemset failed");
ret = -1;
goto finish;
}
//MQX_TPRINT("hipMemset succeeded");
if (hipMemcpy(ptr2, dptr, size, hipMemcpyDeviceToHost) != hipSuccess) {
MQX_TPRINT("hipMemcpy DtoH failed");
ret = -1;
goto finish;
}
//MQX_TPRINT("hipMemcpyDeviceToHost succeeded");
memset((char *)ptr + memset_off, memset_value, memset_len);
if (cmp_data(ptr2, ptr, size) != 0) {
MQX_TPRINT("Memset test of size(%lu) off(%lu) len(%lu): "
"verification failed", size, memset_off, memset_len);
ret = -1;
}
else
MQX_TPRINT("Memset test of size(%lu) off(%lu) len(%lu): "
"verification passed", size, memset_off, memset_len);
finish:
if (hipFree(dptr) != hipSuccess) {
MQX_TPRINT("hipFree for dptr failed");
}
free(ptr);
free(ptr2);
return ret;
}
int test_memset()
{
bool test_failed = false;
size_t size = 4096;
srand(time(NULL));
while (size < 1024 * 1024 * 10) {
// Partial memset
test_failed |= do_test_memset(size, size / 4, size / 2) < 0;
// Complete memset
test_failed |= do_test_memset(size, 0, size) < 0;
size *= 2;
}
return test_failed ? -1 : 0;
}
| d39ed1b578aaa0435a8643a703e5adc9aaa01e6a.cu | // memset
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "mqx.h"
#include "test.h"
void init_rand_data(void *data, size_t size)
{
size_t i;
for (i = 0; i < size; i += sizeof(int)) {
if (i + sizeof(int) <= size)
*((int *)((char *)data + i)) = rand();
else
break;
}
if (i < size) {
while (i < size) {
*((char *)((char *)data + i)) = rand() % 256;
++i;
}
}
}
int cmp_data(void *data1, void *data2, size_t size)
{
unsigned char *c1 = (unsigned char *)data1;
unsigned char *c2 = (unsigned char *)data2;
size_t i;
for (i = 0; i < size; i++) {
if (*(c1 + i) < *(c2 + i)) {
MQX_TPRINT("Diff: i = %lu, c1 = %d, c2 = %d",
i, *(c1 + i), *(c2 + i));
return -1;
}
else if (*(c1 + i) > *(c2 + i)) {
MQX_TPRINT("Diff: i = %lu, c1 = %d, c2 = %d",
i, *(c1 + i), *(c2 + i));
return 1;
}
}
return 0;
}
int do_test_memset(size_t size, size_t memset_off, size_t memset_len)
{
void *dptr, *ptr, *ptr2;
int memset_value = 11;
int ret = 0;
// Mallocs
ptr = malloc(size);
if (!ptr) {
MQX_TPRINT("malloc failed for ptr");
return -1;
}
ptr2 = malloc(size);
if (!ptr2) {
MQX_TPRINT("malloc failed for ptr2");
free(ptr);
return -1;
}
if (cudaMalloc(&dptr, size) != cudaSuccess) {
MQX_TPRINT("cudaMalloc failed for dptr");
free(ptr2);
free(ptr);
return -1;
}
// Initialize source buffer and the device memory region
init_rand_data(ptr, size);
if (cudaMemcpy(dptr, ptr, size, cudaMemcpyHostToDevice) != cudaSuccess) {
MQX_TPRINT("cudaMemcpy HtoD failed");
ret = -1;
goto finish;
}
//MQX_TPRINT("Device memory region initialized");
if (cudaMemset((char *)dptr + memset_off, memset_value, memset_len)
!= cudaSuccess) {
MQX_TPRINT("cudaMemset failed");
ret = -1;
goto finish;
}
//MQX_TPRINT("cudaMemset succeeded");
if (cudaMemcpy(ptr2, dptr, size, cudaMemcpyDeviceToHost) != cudaSuccess) {
MQX_TPRINT("cudaMemcpy DtoH failed");
ret = -1;
goto finish;
}
//MQX_TPRINT("cudaMemcpyDeviceToHost succeeded");
memset((char *)ptr + memset_off, memset_value, memset_len);
if (cmp_data(ptr2, ptr, size) != 0) {
MQX_TPRINT("Memset test of size(%lu) off(%lu) len(%lu): "
"verification failed", size, memset_off, memset_len);
ret = -1;
}
else
MQX_TPRINT("Memset test of size(%lu) off(%lu) len(%lu): "
"verification passed", size, memset_off, memset_len);
finish:
if (cudaFree(dptr) != cudaSuccess) {
MQX_TPRINT("cudaFree for dptr failed");
}
free(ptr);
free(ptr2);
return ret;
}
int test_memset()
{
bool test_failed = false;
size_t size = 4096;
srand(time(NULL));
while (size < 1024 * 1024 * 10) {
// Partial memset
test_failed |= do_test_memset(size, size / 4, size / 2) < 0;
// Complete memset
test_failed |= do_test_memset(size, 0, size) < 0;
size *= 2;
}
return test_failed ? -1 : 0;
}
|
70153e028ec9b3d5ca745ae008137bf9ecb4843e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
/* This is the kernel function that performs the sum reduction.
* This one use the most basic approach (called "interleaved indexing").
* Each thread adds a pair of elements right next to each other.
* The stride in the code below is the distance between the elements that each thread adds.
* This distance doubles on each iteration.
* Note that the number of threads required also halves on each iteration.
*
* Notes on kernel args: the arguments passed in for the arrays must be *device buffers* (not host buffers)!
* n is an integer that is passed in from the host when the kernel is launched.
* No cudaMemCpy is require to do this (args on the stack are copied).
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// The size of the chunk of data this thread's block is working on.
unsigned int chunk_size = block_size * 2;
// Calculate the index that this block's chunk of values starts at.
// Each thread adds 2 values, so each block adds a total of block_size * 2 values.
unsigned int block_start = block_id * chunk_size;
// Perform the reduction using "interleaved indexing" (each thread adds a pair of
// elements right next to each other).
// "stride" is the distance between the elements that each thread adds.
// This distance doubles on each iteration.
// The number of threads required halves on each iteration.
unsigned int left; // holds index of left operand
unsigned int right; // holds index or right operand
unsigned int threads = block_size; // number of active threads (on current iteration)
for (unsigned int stride = 1; stride < chunk_size; stride *= 2, threads /= 2)
{
// There's a distance of stride between each pair of left and right operand indices,
// so there's a distance of stride * 2 between consecutive left indices
left = block_start + thread_id * (stride * 2);
right = left + stride;
if (thread_id < threads // read: "If this thread should be
// active on this iteration of the reduction."
&& right < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they dont interfere.
{
input[left] += input[right];
}
// Each block may be running multiple warps. These warps may not all be in
// sync. The call below syncs the warps in the block at the end of each iteration
// so that the results are written to memory before the next iteration begins.
__syncthreads();
}
// Once the loop is done, the partial sum for this block will be in the leftmost index
// of this block's chunk. The code below causes each block's thread 0 to write that
// partial result to the output buffer at position "block_id". After the code
// below completes, the output buffer will contain exactly <number of blocks>
// consecutive partial results.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
| 70153e028ec9b3d5ca745ae008137bf9ecb4843e.cu | #include "kernels.h"
/* This is the kernel function that performs the sum reduction.
* This one use the most basic approach (called "interleaved indexing").
* Each thread adds a pair of elements right next to each other.
* The stride in the code below is the distance between the elements that each thread adds.
* This distance doubles on each iteration.
* Note that the number of threads required also halves on each iteration.
*
* Notes on kernel args: the arguments passed in for the arrays must be *device buffers* (not host buffers)!
* n is an integer that is passed in from the host when the kernel is launched.
* No cudaMemCpy is require to do this (args on the stack are copied).
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// The size of the chunk of data this thread's block is working on.
unsigned int chunk_size = block_size * 2;
// Calculate the index that this block's chunk of values starts at.
// Each thread adds 2 values, so each block adds a total of block_size * 2 values.
unsigned int block_start = block_id * chunk_size;
// Perform the reduction using "interleaved indexing" (each thread adds a pair of
// elements right next to each other).
// "stride" is the distance between the elements that each thread adds.
// This distance doubles on each iteration.
// The number of threads required halves on each iteration.
unsigned int left; // holds index of left operand
unsigned int right; // holds index or right operand
unsigned int threads = block_size; // number of active threads (on current iteration)
for (unsigned int stride = 1; stride < chunk_size; stride *= 2, threads /= 2)
{
// There's a distance of stride between each pair of left and right operand indices,
// so there's a distance of stride * 2 between consecutive left indices
left = block_start + thread_id * (stride * 2);
right = left + stride;
if (thread_id < threads // read: "If this thread should be
// active on this iteration of the reduction."
&& right < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they dont interfere.
{
input[left] += input[right];
}
// Each block may be running multiple warps. These warps may not all be in
// sync. The call below syncs the warps in the block at the end of each iteration
// so that the results are written to memory before the next iteration begins.
__syncthreads();
}
// Once the loop is done, the partial sum for this block will be in the leftmost index
// of this block's chunk. The code below causes each block's thread 0 to write that
// partial result to the output buffer at position "block_id". After the code
// below completes, the output buffer will contain exactly <number of blocks>
// consecutive partial results.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
|
3201c9b20f3d963a8acf28b9b31eae139bed2896.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/activation/interp_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
static __global__ void interp_foward_kernel(int IND,int channels,int height_in,int width_in,int height_out,int width_out,const float * id, float *od)
{
CUDA_KERNEL_LOOP(ind, IND)
{
int w=ind % width_out;
int h=ind / width_out % height_out;
int c=ind / width_out / height_out % channels;
int n=ind / width_out / height_out / channels;
int h_in = floor(float(h)/float(height_out)*float(height_in));
int w_in = floor(float(w)/float(width_out)*float(width_in));
int ind_in = ((n*channels+c)*height_in + h_in)*width_in + w_in;
od[ind] = id[ind_in];
}
}
static __global__ void interp_backward_0_kernel(int IND,int channels,int height_in,int width_in,int height_out,int width_out,const float * id, float *od)
{
CUDA_KERNEL_LOOP(ind, IND)
{
int w=ind % width_out;
int h=ind / width_out % height_out;
int c=ind / width_out / height_out % channels;
int n=ind / width_out / height_out / channels;
int h_in = floor(float(h)/float(height_out)*float(height_in));
int w_in = floor(float(w)/float(width_out)*float(width_in));
int ind_in = ((n*channels+c)*height_in + h_in)*width_in + w_in;
od[ind_in] = id[ind];
}
}
static __global__ void interp_backward_1_kernel(int IND,int channels,int height_out,int width_out,int height_in,int width_in,const float * od, float *id)
{
CUDA_KERNEL_LOOP(ind, IND)
{
int w=ind % width_in;
int h=ind / width_in % height_in;
int c=ind / width_in / height_in % channels;
int n=ind / width_in / height_in / channels;
int h_begin = ceil(float(h)/float(height_in)*float(height_out));
int h_end = ceil(float(h+1)/float(height_in)*float(height_out));
int w_begin = ceil(float(w)/float(width_in)*float(width_out));
int w_end = ceil(float(w+1)/float(width_in)*float(width_out));
float sum = 0;
for(int h_out=h_begin;h_out<h_end;h_out++)
for(int w_out=w_begin;w_out<w_end;w_out++)
{
int ind_out = ((n*channels+c)*height_out + h_out)*width_out + w_out;
sum += od[ind_out];
}
id[ind] = sum;
}
}
void InterpLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
hipLaunchKernelGGL(( interp_foward_kernel), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(),top[0]->channels(),bottom[0]->height(),bottom[0]->width(),top[0]->height(),top[0]->width(),
bottom[0]->gpu_data(),top[0]->mutable_gpu_data());
}
void InterpLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
if (bottom[0]->height() > top[0]->height())
{
caffe_gpu_set(bottom[0]->count(),float(0),bottom[0]->mutable_gpu_diff());
hipLaunchKernelGGL(( interp_backward_0_kernel), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(),top[0]->channels(),bottom[0]->height(),bottom[0]->width(),top[0]->height(),top[0]->width(),
top[0]->gpu_diff(),bottom[0]->mutable_gpu_diff());
}
else
{
hipLaunchKernelGGL(( interp_backward_1_kernel), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(),bottom[0]->channels(),top[0]->height(),top[0]->width(),bottom[0]->height(),bottom[0]->width(),
top[0]->gpu_diff(),bottom[0]->mutable_gpu_diff());
}
}
void InterpLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
hipLaunchKernelGGL(( interp_foward_kernel), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(),top[0]->channels(),bottom[0]->height(),bottom[0]->width(),top[0]->height(),top[0]->width(),
bottom[0]->gpu_sec_diff(),top[0]->mutable_gpu_sec_diff());
}
} // namespace caffe
| 3201c9b20f3d963a8acf28b9b31eae139bed2896.cu | #include <vector>
#include "caffe/layers/activation/interp_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
static __global__ void interp_foward_kernel(int IND,int channels,int height_in,int width_in,int height_out,int width_out,const float * id, float *od)
{
CUDA_KERNEL_LOOP(ind, IND)
{
int w=ind % width_out;
int h=ind / width_out % height_out;
int c=ind / width_out / height_out % channels;
int n=ind / width_out / height_out / channels;
int h_in = floor(float(h)/float(height_out)*float(height_in));
int w_in = floor(float(w)/float(width_out)*float(width_in));
int ind_in = ((n*channels+c)*height_in + h_in)*width_in + w_in;
od[ind] = id[ind_in];
}
}
static __global__ void interp_backward_0_kernel(int IND,int channels,int height_in,int width_in,int height_out,int width_out,const float * id, float *od)
{
CUDA_KERNEL_LOOP(ind, IND)
{
int w=ind % width_out;
int h=ind / width_out % height_out;
int c=ind / width_out / height_out % channels;
int n=ind / width_out / height_out / channels;
int h_in = floor(float(h)/float(height_out)*float(height_in));
int w_in = floor(float(w)/float(width_out)*float(width_in));
int ind_in = ((n*channels+c)*height_in + h_in)*width_in + w_in;
od[ind_in] = id[ind];
}
}
static __global__ void interp_backward_1_kernel(int IND,int channels,int height_out,int width_out,int height_in,int width_in,const float * od, float *id)
{
CUDA_KERNEL_LOOP(ind, IND)
{
int w=ind % width_in;
int h=ind / width_in % height_in;
int c=ind / width_in / height_in % channels;
int n=ind / width_in / height_in / channels;
int h_begin = ceil(float(h)/float(height_in)*float(height_out));
int h_end = ceil(float(h+1)/float(height_in)*float(height_out));
int w_begin = ceil(float(w)/float(width_in)*float(width_out));
int w_end = ceil(float(w+1)/float(width_in)*float(width_out));
float sum = 0;
for(int h_out=h_begin;h_out<h_end;h_out++)
for(int w_out=w_begin;w_out<w_end;w_out++)
{
int ind_out = ((n*channels+c)*height_out + h_out)*width_out + w_out;
sum += od[ind_out];
}
id[ind] = sum;
}
}
void InterpLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
interp_foward_kernel<<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(top[0]->count(),top[0]->channels(),bottom[0]->height(),bottom[0]->width(),top[0]->height(),top[0]->width(),
bottom[0]->gpu_data(),top[0]->mutable_gpu_data());
}
void InterpLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
if (bottom[0]->height() > top[0]->height())
{
caffe_gpu_set(bottom[0]->count(),float(0),bottom[0]->mutable_gpu_diff());
interp_backward_0_kernel<<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(top[0]->count(),top[0]->channels(),bottom[0]->height(),bottom[0]->width(),top[0]->height(),top[0]->width(),
top[0]->gpu_diff(),bottom[0]->mutable_gpu_diff());
}
else
{
interp_backward_1_kernel<<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(bottom[0]->count(),bottom[0]->channels(),top[0]->height(),top[0]->width(),bottom[0]->height(),bottom[0]->width(),
top[0]->gpu_diff(),bottom[0]->mutable_gpu_diff());
}
}
void InterpLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
interp_foward_kernel<<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(top[0]->count(),top[0]->channels(),bottom[0]->height(),bottom[0]->width(),top[0]->height(),top[0]->width(),
bottom[0]->gpu_sec_diff(),top[0]->mutable_gpu_sec_diff());
}
} // namespace caffe
|
1256f279ba9aaeb51d8db878b4bfb25fc9c43655.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include <wmma_extension/mma_simt.hpp>
#include "utils.hpp"
namespace {
template <class T>
constexpr double error_threshold = 0.0;
template <>
constexpr double error_threshold<half > = 1e-3;
template <>
constexpr double error_threshold<float> = 1e-6;
template <>
constexpr double error_threshold<double> = 1e-15;
template <class T>
struct smem_t {using type = float;};
template <>
struct smem_t<double> {using type = double;};
} // noname namespace
template <unsigned N, class T, class A_Layout, class B_Layout, class MEM_T>
__global__ void mma_kernel_abcd(MEM_T* const d_ptr, const MEM_T* const a_ptr, const MEM_T* const b_ptr, const MEM_T* const c_ptr, const nvcuda::wmma::layout_t cd_layout) {
constexpr unsigned LD = N;
__shared__ MEM_T smem[N * LD];
mtk::test_utils::fill_zero(smem, N * LD);
mtk::wmma::mma_simt::fragment<nvcuda::wmma::matrix_a , N, N, N, T, A_Layout> frag_a;
mtk::wmma::mma_simt::fragment<nvcuda::wmma::matrix_b , N, N, N, T, B_Layout> frag_b;
mtk::wmma::mma_simt::fragment<nvcuda::wmma::accumulator, N, N, N, T, void > frag_c, frag_d;
// Load A
mtk::test_utils::copy_matrix(smem, LD, a_ptr, N, N, N);
mtk::wmma::mma_simt::load_matrix_sync(frag_a, smem, LD);
// Load B
mtk::test_utils::copy_matrix(smem, LD, b_ptr, N, N, N);
mtk::wmma::mma_simt::load_matrix_sync(frag_b, smem, LD);
// Load C
mtk::test_utils::copy_matrix(smem, LD, c_ptr, N, N, N);
mtk::wmma::mma_simt::load_matrix_sync(frag_c, smem, LD, cd_layout);
// Fill D
mtk::wmma::mma_simt::fill_fragment(frag_d, 0.0f);
// mma
mtk::wmma::mma_simt::mma_sync(frag_d, frag_a, frag_b, frag_c);
// Store D
mtk::wmma::mma_simt::store_matrix_sync(smem, frag_d, LD, cd_layout);
mtk::test_utils::copy_matrix(d_ptr, N, smem, LD, N, N);
// Test for fill_zero
mtk::wmma::mma_simt::fill_zero(frag_d);
}
template <unsigned N, class T, class A_Layout, class B_Layout>
void test_mma(const nvcuda::wmma::layout_t cd_layout) {
using mem_t = typename smem_t<T>::type;
mem_t *hA, *hB, *hC, *hD;
hipHostMalloc(&hA, N * N * sizeof(mem_t));
hipHostMalloc(&hB, N * N * sizeof(mem_t));
hipHostMalloc(&hC, N * N * sizeof(mem_t));
hipHostMalloc(&hD, N * N * sizeof(mem_t));
std::mt19937 mt(std::random_device{}());
std::uniform_real_distribution<mem_t> dist(-1.0f, 1.0f);
for (unsigned i = 0; i < N * N; i++) {
hA[i] = dist(mt);
hB[i] = dist(mt);
hC[i] = dist(mt);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( mma_kernel_abcd<N, T, A_Layout, B_Layout, mem_t>), dim3(1), dim3(mtk::test_utils::warp_size), 0, 0, hD, hA, hB, hC, cd_layout);
const auto stat = hipDeviceSynchronize();
if (stat != hipSuccess) {
std::printf("[error] %s\n", hipGetErrorString(stat));
}
double max_error = 0.;
for (unsigned m = 0; m < N; m++) {
for (unsigned n = 0; n < N; n++) {
double cor_d = 0.;
for (unsigned k = 0; k < N; k++) {
const auto a_mem_index = std::is_same<A_Layout, nvcuda::wmma::col_major>::value ? (k * N + m) : (m * N + k);
const auto b_mem_index = std::is_same<B_Layout, nvcuda::wmma::col_major>::value ? (k + n * N) : (n + k * N);
cor_d += static_cast<double>(hA[a_mem_index]) * static_cast<double>(hB[b_mem_index]);
}
const auto c_mem_index = (cd_layout == nvcuda::wmma::mem_col_major) ? (m + n * N) : (n + m * N);
cor_d += hC[c_mem_index];
max_error = ::max(max_error, std::abs(cor_d - hD[c_mem_index]));
}
}
std::printf(
"[Type:%6s, N:%3u, A_Layout:%10s, B_Layout:%10s, C_Layout:%10s, FragShape<%2d,%2d,%2d>] max_error: %e (%6s)\n",
mtk::test_utils::to_string<T>().c_str(),
N,
mtk::test_utils::to_string<A_Layout>().c_str(),
mtk::test_utils::to_string<B_Layout>().c_str(),
(cd_layout == nvcuda::wmma::mem_col_major) ? mtk::test_utils::to_string<nvcuda::wmma::col_major>().c_str() : mtk::test_utils::to_string<nvcuda::wmma::row_major>().c_str(),
N, N, N,
max_error,
(max_error < (error_threshold<T> * N) ? "PASSED" : "FAILED")
);
hipHostFree(hA);
hipHostFree(hB);
hipHostFree(hC);
hipHostFree(hD);
}
int main() {
// wmma FP16 test
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
}
| 1256f279ba9aaeb51d8db878b4bfb25fc9c43655.cu | #include <iostream>
#include <random>
#include <wmma_extension/mma_simt.hpp>
#include "utils.hpp"
namespace {
template <class T>
constexpr double error_threshold = 0.0;
template <>
constexpr double error_threshold<half > = 1e-3;
template <>
constexpr double error_threshold<float> = 1e-6;
template <>
constexpr double error_threshold<double> = 1e-15;
template <class T>
struct smem_t {using type = float;};
template <>
struct smem_t<double> {using type = double;};
} // noname namespace
template <unsigned N, class T, class A_Layout, class B_Layout, class MEM_T>
__global__ void mma_kernel_abcd(MEM_T* const d_ptr, const MEM_T* const a_ptr, const MEM_T* const b_ptr, const MEM_T* const c_ptr, const nvcuda::wmma::layout_t cd_layout) {
constexpr unsigned LD = N;
__shared__ MEM_T smem[N * LD];
mtk::test_utils::fill_zero(smem, N * LD);
mtk::wmma::mma_simt::fragment<nvcuda::wmma::matrix_a , N, N, N, T, A_Layout> frag_a;
mtk::wmma::mma_simt::fragment<nvcuda::wmma::matrix_b , N, N, N, T, B_Layout> frag_b;
mtk::wmma::mma_simt::fragment<nvcuda::wmma::accumulator, N, N, N, T, void > frag_c, frag_d;
// Load A
mtk::test_utils::copy_matrix(smem, LD, a_ptr, N, N, N);
mtk::wmma::mma_simt::load_matrix_sync(frag_a, smem, LD);
// Load B
mtk::test_utils::copy_matrix(smem, LD, b_ptr, N, N, N);
mtk::wmma::mma_simt::load_matrix_sync(frag_b, smem, LD);
// Load C
mtk::test_utils::copy_matrix(smem, LD, c_ptr, N, N, N);
mtk::wmma::mma_simt::load_matrix_sync(frag_c, smem, LD, cd_layout);
// Fill D
mtk::wmma::mma_simt::fill_fragment(frag_d, 0.0f);
// mma
mtk::wmma::mma_simt::mma_sync(frag_d, frag_a, frag_b, frag_c);
// Store D
mtk::wmma::mma_simt::store_matrix_sync(smem, frag_d, LD, cd_layout);
mtk::test_utils::copy_matrix(d_ptr, N, smem, LD, N, N);
// Test for fill_zero
mtk::wmma::mma_simt::fill_zero(frag_d);
}
template <unsigned N, class T, class A_Layout, class B_Layout>
void test_mma(const nvcuda::wmma::layout_t cd_layout) {
using mem_t = typename smem_t<T>::type;
mem_t *hA, *hB, *hC, *hD;
cudaMallocHost(&hA, N * N * sizeof(mem_t));
cudaMallocHost(&hB, N * N * sizeof(mem_t));
cudaMallocHost(&hC, N * N * sizeof(mem_t));
cudaMallocHost(&hD, N * N * sizeof(mem_t));
std::mt19937 mt(std::random_device{}());
std::uniform_real_distribution<mem_t> dist(-1.0f, 1.0f);
for (unsigned i = 0; i < N * N; i++) {
hA[i] = dist(mt);
hB[i] = dist(mt);
hC[i] = dist(mt);
}
cudaDeviceSynchronize();
mma_kernel_abcd<N, T, A_Layout, B_Layout, mem_t><<<1, mtk::test_utils::warp_size>>>(hD, hA, hB, hC, cd_layout);
const auto stat = cudaDeviceSynchronize();
if (stat != cudaSuccess) {
std::printf("[error] %s\n", cudaGetErrorString(stat));
}
double max_error = 0.;
for (unsigned m = 0; m < N; m++) {
for (unsigned n = 0; n < N; n++) {
double cor_d = 0.;
for (unsigned k = 0; k < N; k++) {
const auto a_mem_index = std::is_same<A_Layout, nvcuda::wmma::col_major>::value ? (k * N + m) : (m * N + k);
const auto b_mem_index = std::is_same<B_Layout, nvcuda::wmma::col_major>::value ? (k + n * N) : (n + k * N);
cor_d += static_cast<double>(hA[a_mem_index]) * static_cast<double>(hB[b_mem_index]);
}
const auto c_mem_index = (cd_layout == nvcuda::wmma::mem_col_major) ? (m + n * N) : (n + m * N);
cor_d += hC[c_mem_index];
max_error = std::max(max_error, std::abs(cor_d - hD[c_mem_index]));
}
}
std::printf(
"[Type:%6s, N:%3u, A_Layout:%10s, B_Layout:%10s, C_Layout:%10s, FragShape<%2d,%2d,%2d>] max_error: %e (%6s)\n",
mtk::test_utils::to_string<T>().c_str(),
N,
mtk::test_utils::to_string<A_Layout>().c_str(),
mtk::test_utils::to_string<B_Layout>().c_str(),
(cd_layout == nvcuda::wmma::mem_col_major) ? mtk::test_utils::to_string<nvcuda::wmma::col_major>().c_str() : mtk::test_utils::to_string<nvcuda::wmma::row_major>().c_str(),
N, N, N,
max_error,
(max_error < (error_threshold<T> * N) ? "PASSED" : "FAILED")
);
cudaFreeHost(hA);
cudaFreeHost(hB);
cudaFreeHost(hC);
cudaFreeHost(hD);
}
int main() {
// wmma FP16 test
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, half , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, float , nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_col_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::col_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::col_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
test_mma<16, double, nvcuda::wmma::row_major, nvcuda::wmma::row_major>(nvcuda::wmma::mem_row_major);
}
|
c23ce112d6e2e0c5e93809b932077aa647b43576.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
// remember to delete the allocated imagePtr in the end
void loadImageRGBA(const std::string &filename,
uchar4 **imagePtr,
size_t *numRows, size_t *numCols)
{
cv::Mat image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
if (image.channels() != 3) {
std::cerr << "Image must be color!" << std::endl;
exit(1);
}
if (!image.isContinuous()) {
std::cerr << "Image isn't continuous!" << std::endl;
exit(1);
}
cv::Mat imageRGBA;
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
*imagePtr = new uchar4[image.rows * image.cols];
unsigned char *cvPtr = imageRGBA.ptr<unsigned char>(0);
for (size_t i = 0; i < image.rows * image.cols; ++i) {
(*imagePtr)[i].x = cvPtr[4 * i + 0];
(*imagePtr)[i].y = cvPtr[4 * i + 1];
(*imagePtr)[i].z = cvPtr[4 * i + 2];
(*imagePtr)[i].w = cvPtr[4 * i + 3];
}
*numRows = image.rows;
*numCols = image.cols;
}
void saveImageRGBA(const uchar4* const image,
const size_t numRows, const size_t numCols,
const std::string &output_file)
{
int sizes[2];
sizes[0] = numRows;
sizes[1] = numCols;
cv::Mat imageRGBA(2, sizes, CV_8UC4, (void *)image);
cv::Mat imageOutputBGR;
cv::cvtColor(imageRGBA, imageOutputBGR, CV_RGBA2BGR);
//output the image
cv::imwrite(output_file.c_str(), imageOutputBGR);
}
__global__
void computeMask(const uchar4* const sourceImg,
bool* const mask,
const size_t size)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= size) return;
unsigned int pixSum = sourceImg[tId].x + sourceImg[tId].y + sourceImg[tId].z;
if (pixSum < 3*255)
{
mask[tId] = true;
}
else
{
mask[tId] = false;
}
}
__global__
void computeBorderPixAndInteriorPix(const bool* const mask,
bool* const interiorPixels,
bool* const borderPixels,
const size_t numRows,
const size_t numCols)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= numRows*numCols) return;
if (!mask[tId])
{
interiorPixels[tId] = false;
borderPixels[tId] = false;
return;
}
if (mask[tId - numCols] && mask[tId + numCols]
&& mask[tId-1] && mask[tId+1])
{
interiorPixels[tId] = true;
borderPixels[tId] = false;
}
else
{
interiorPixels[tId] = false;
borderPixels[tId] = true;
}
}
__global__
void computeG(const uchar4* const sourceImg,
const bool* const interiorPixels,
float3* const g,
const size_t numRows,
const size_t numCols)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= numRows*numCols) return;
if (!interiorPixels[tId]) return;
float sumX = 4.f*sourceImg[tId].x;
float sumY = 4.f*sourceImg[tId].y;
float sumZ = 4.f*sourceImg[tId].z;
sumX -= (float)sourceImg[tId-numCols].x + (float)sourceImg[tId+numCols].x
+ (float)sourceImg[tId-1].x + (float)sourceImg[tId+1].x;
sumY -= (float)sourceImg[tId-numCols].y + (float)sourceImg[tId+numCols].y
+ (float)sourceImg[tId-1].y + (float)sourceImg[tId+1].y;
sumZ -= (float)sourceImg[tId-numCols].z + (float)sourceImg[tId+numCols].z
+ (float)sourceImg[tId-1].z + (float)sourceImg[tId+1].z;
g[tId].x = sumX;
g[tId].y = sumY;
g[tId].z = sumZ;
}
__global__
void copySourceImgToBlendedVals(const uchar4* const sourceImg,
float3* const blendedVals1,
float3* const blendedVals2,
const size_t size)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= size) return;
blendedVals1[tId].x = (float)sourceImg[tId].x;
blendedVals1[tId].y = (float)sourceImg[tId].y;
blendedVals1[tId].z = (float)sourceImg[tId].z;
blendedVals2[tId].x = (float)sourceImg[tId].x;
blendedVals2[tId].y = (float)sourceImg[tId].y;
blendedVals2[tId].z = (float)sourceImg[tId].z;
}
__global__
void computeIteration(const uchar4* const destImg,
const bool* const interiorPixels,
const bool* const borderPixels,
const float3* const blendedVals1,
const float3* const g,
float3* const blendedVals2,
const size_t numRows,
const size_t numCols)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= numRows*numCols) return;
if (!interiorPixels[tId]) return;
float blendedSumX = 0.f;
float blendedSumY = 0.f;
float blendedSumZ = 0.f;
float borderSumX = 0.f;
float borderSumY = 0.f;
float borderSumZ = 0.f;
if (interiorPixels[tId-1])
{
blendedSumX += blendedVals1[tId-1].x;
blendedSumY += blendedVals1[tId-1].y;
blendedSumZ += blendedVals1[tId-1].z;
}
else
{
borderSumX += destImg[tId-1].x;
borderSumY += destImg[tId-1].y;
borderSumZ += destImg[tId-1].z;
}
if (interiorPixels[tId+1])
{
blendedSumX += blendedVals1[tId+1].x;
blendedSumY += blendedVals1[tId+1].y;
blendedSumZ += blendedVals1[tId+1].z;
}
else
{
borderSumX += destImg[tId+1].x;
borderSumY += destImg[tId+1].y;
borderSumZ += destImg[tId+1].z;
}
if (interiorPixels[tId-numCols])
{
blendedSumX += blendedVals1[tId-numCols].x;
blendedSumY += blendedVals1[tId-numCols].y;
blendedSumZ += blendedVals1[tId-numCols].z;
}
else
{
borderSumX += destImg[tId-numCols].x;
borderSumY += destImg[tId-numCols].y;
borderSumZ += destImg[tId-numCols].z;
}
if (interiorPixels[tId+numCols])
{
blendedSumX += blendedVals1[tId+numCols].x;
blendedSumY += blendedVals1[tId+numCols].y;
blendedSumZ += blendedVals1[tId+numCols].z;
}
else
{
borderSumX += destImg[tId+numCols].x;
borderSumY += destImg[tId+numCols].y;
borderSumZ += destImg[tId+numCols].z;
}
float next_valX = (blendedSumX+borderSumX+g[tId].x)/4.f;
float next_valY = (blendedSumY+borderSumY+g[tId].y)/4.f;
float next_valZ = (blendedSumZ+borderSumZ+g[tId].z)/4.f;
blendedVals2[tId].x = min(255.f, max(0.f, next_valX));
blendedVals2[tId].y = min(255.f, max(0.f, next_valY));
blendedVals2[tId].z = min(255.f, max(0.f, next_valZ));
}
__global__
void copyBlendedValsToOutput(const float3* const blendedVals,
const bool* const interiorPixels,
uchar4* const blendedImg,
const size_t size)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= size) return;
if (!interiorPixels[tId]) return;
blendedImg[tId].x = (unsigned char)blendedVals[tId].x;
blendedImg[tId].y = (unsigned char)blendedVals[tId].y;
blendedImg[tId].z = (unsigned char)blendedVals[tId].z;
}
void debugPrint(uchar4* data, size_t rows, size_t cols)
{
std::ofstream outFile;
outFile.open("log", std::ios::trunc);
for (int i=0; i<rows; i++)
{
for (int j=0; j<cols; j++)
{
outFile << "(" << (int)data[i*cols+j].x << ", "
<< (int)data[i*cols+j].y << ", "
<< (int)data[i*cols+j].z << ", "
<< (int)data[i*cols+j].w << ") ";
}
outFile << std::endl;
}
outFile.close();
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
const size_t imgSize = numRowsSource*numColsSource;
unsigned int blockSize = 1024;
unsigned int gridSize = (imgSize+blockSize-1) / blockSize;
bool *d_mask;
uchar4 *d_sourceImg;
uchar4 *d_destImg;
uchar4 *d_blendedImg;
bool *d_interiorPixels;
bool *d_borderPixels;
float3 *d_g;
float3 *d_blendedVals1;
float3 *d_blendedVals2;
// device mem allocs
checkCudaErrors(hipMalloc(&d_mask, sizeof(bool)*imgSize));
checkCudaErrors(hipMalloc(&d_sourceImg, sizeof(uchar4)*imgSize));
checkCudaErrors(hipMalloc(&d_destImg, sizeof(uchar4)*imgSize));
checkCudaErrors(hipMalloc(&d_blendedImg, sizeof(uchar4)*imgSize));
checkCudaErrors(hipMalloc(&d_interiorPixels, sizeof(bool)*imgSize));
checkCudaErrors(hipMalloc(&d_borderPixels, sizeof(bool)*imgSize));
checkCudaErrors(hipMalloc(&d_g, sizeof(float3)*imgSize));
checkCudaErrors(hipMalloc(&d_blendedVals1, sizeof(float3)*imgSize));
checkCudaErrors(hipMalloc(&d_blendedVals2, sizeof(float3)*imgSize));
// memcpy to device
checkCudaErrors(hipMemcpy(d_sourceImg, h_sourceImg, sizeof(uchar4)*imgSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_destImg, h_destImg, sizeof(uchar4)*imgSize, hipMemcpyHostToDevice));
hipDeviceSynchronize();
// data preprocess
hipLaunchKernelGGL(( computeMask), dim3(gridSize), dim3(blockSize), 0, 0, d_sourceImg, d_mask, imgSize);
hipDeviceSynchronize();
hipLaunchKernelGGL(( computeBorderPixAndInteriorPix), dim3(gridSize), dim3(blockSize), 0, 0, d_mask, d_interiorPixels,
d_borderPixels, numRowsSource, numColsSource);
hipDeviceSynchronize();
hipLaunchKernelGGL(( computeG), dim3(gridSize), dim3(blockSize), 0, 0, d_sourceImg, d_interiorPixels, d_g,
numRowsSource, numColsSource);
hipLaunchKernelGGL(( copySourceImgToBlendedVals), dim3(gridSize), dim3(blockSize), 0, 0, d_sourceImg, d_blendedVals1, d_blendedVals2, imgSize);
hipDeviceSynchronize();
// start iterations
for (int i=0; i<8000; i++)
{
hipLaunchKernelGGL(( computeIteration), dim3(gridSize), dim3(blockSize), 0, 0, d_destImg, d_interiorPixels, d_borderPixels,
d_blendedVals1, d_g, d_blendedVals2, numRowsSource, numColsSource);
hipDeviceSynchronize();
std::swap(d_blendedVals1, d_blendedVals2); // output goes to d_blendedVals1
}
// copy dtsImg to outputImg
checkCudaErrors(hipMemcpy(d_blendedImg, d_destImg, sizeof(uchar4)*imgSize, hipMemcpyDeviceToDevice));
hipDeviceSynchronize();
hipLaunchKernelGGL(( copyBlendedValsToOutput), dim3(gridSize), dim3(blockSize), 0, 0, d_blendedVals1, d_interiorPixels, d_blendedImg, imgSize);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(h_blendedImg, d_blendedImg, sizeof(uchar4)*imgSize, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
// free device memory
checkCudaErrors(hipFree(d_mask));
checkCudaErrors(hipFree(d_sourceImg));
checkCudaErrors(hipFree(d_destImg));
checkCudaErrors(hipFree(d_blendedImg));
checkCudaErrors(hipFree(d_interiorPixels));
checkCudaErrors(hipFree(d_borderPixels));
checkCudaErrors(hipFree(d_g));
checkCudaErrors(hipFree(d_blendedVals1));
checkCudaErrors(hipFree(d_blendedVals2));
}
void poissonBlendingSample()
{
std::string input_source_file("datadropbox/input_source.png");
std::string input_dest_file("datadropbox/input_dest.png");
std::string output_file("datadropbox/output.png");
uchar4 *h_sourceImg, *h_destImg, *h_blendedImg;
size_t numRowsSource, numColsSource, numRowsDest, numColsDest;
loadImageRGBA(input_source_file, &h_sourceImg, &numRowsSource, &numColsSource);
loadImageRGBA(input_dest_file, &h_destImg, &numRowsDest, &numColsDest);
//debugPrint(h_sourceImg, numRowsSource, numColsSource);
assert(numRowsSource == numRowsDest);
assert(numColsSource == numColsDest);
h_blendedImg = new uchar4[numRowsSource*numColsSource];
printf("Poisson Blending sample starts ... \n");
your_blend(h_sourceImg, numRowsSource, numColsSource, h_destImg, h_blendedImg);
printf("DONE!\n");
saveImageRGBA(h_blendedImg, numRowsDest, numColsDest, output_file);
delete h_sourceImg;
delete h_destImg;
delete h_blendedImg;
}
| c23ce112d6e2e0c5e93809b932077aa647b43576.cu | #include <stdlib.h>
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
// remember to delete the allocated imagePtr in the end
void loadImageRGBA(const std::string &filename,
uchar4 **imagePtr,
size_t *numRows, size_t *numCols)
{
cv::Mat image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
if (image.channels() != 3) {
std::cerr << "Image must be color!" << std::endl;
exit(1);
}
if (!image.isContinuous()) {
std::cerr << "Image isn't continuous!" << std::endl;
exit(1);
}
cv::Mat imageRGBA;
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
*imagePtr = new uchar4[image.rows * image.cols];
unsigned char *cvPtr = imageRGBA.ptr<unsigned char>(0);
for (size_t i = 0; i < image.rows * image.cols; ++i) {
(*imagePtr)[i].x = cvPtr[4 * i + 0];
(*imagePtr)[i].y = cvPtr[4 * i + 1];
(*imagePtr)[i].z = cvPtr[4 * i + 2];
(*imagePtr)[i].w = cvPtr[4 * i + 3];
}
*numRows = image.rows;
*numCols = image.cols;
}
void saveImageRGBA(const uchar4* const image,
const size_t numRows, const size_t numCols,
const std::string &output_file)
{
int sizes[2];
sizes[0] = numRows;
sizes[1] = numCols;
cv::Mat imageRGBA(2, sizes, CV_8UC4, (void *)image);
cv::Mat imageOutputBGR;
cv::cvtColor(imageRGBA, imageOutputBGR, CV_RGBA2BGR);
//output the image
cv::imwrite(output_file.c_str(), imageOutputBGR);
}
__global__
void computeMask(const uchar4* const sourceImg,
bool* const mask,
const size_t size)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= size) return;
unsigned int pixSum = sourceImg[tId].x + sourceImg[tId].y + sourceImg[tId].z;
if (pixSum < 3*255)
{
mask[tId] = true;
}
else
{
mask[tId] = false;
}
}
__global__
void computeBorderPixAndInteriorPix(const bool* const mask,
bool* const interiorPixels,
bool* const borderPixels,
const size_t numRows,
const size_t numCols)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= numRows*numCols) return;
if (!mask[tId])
{
interiorPixels[tId] = false;
borderPixels[tId] = false;
return;
}
if (mask[tId - numCols] && mask[tId + numCols]
&& mask[tId-1] && mask[tId+1])
{
interiorPixels[tId] = true;
borderPixels[tId] = false;
}
else
{
interiorPixels[tId] = false;
borderPixels[tId] = true;
}
}
__global__
void computeG(const uchar4* const sourceImg,
const bool* const interiorPixels,
float3* const g,
const size_t numRows,
const size_t numCols)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= numRows*numCols) return;
if (!interiorPixels[tId]) return;
float sumX = 4.f*sourceImg[tId].x;
float sumY = 4.f*sourceImg[tId].y;
float sumZ = 4.f*sourceImg[tId].z;
sumX -= (float)sourceImg[tId-numCols].x + (float)sourceImg[tId+numCols].x
+ (float)sourceImg[tId-1].x + (float)sourceImg[tId+1].x;
sumY -= (float)sourceImg[tId-numCols].y + (float)sourceImg[tId+numCols].y
+ (float)sourceImg[tId-1].y + (float)sourceImg[tId+1].y;
sumZ -= (float)sourceImg[tId-numCols].z + (float)sourceImg[tId+numCols].z
+ (float)sourceImg[tId-1].z + (float)sourceImg[tId+1].z;
g[tId].x = sumX;
g[tId].y = sumY;
g[tId].z = sumZ;
}
__global__
void copySourceImgToBlendedVals(const uchar4* const sourceImg,
float3* const blendedVals1,
float3* const blendedVals2,
const size_t size)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= size) return;
blendedVals1[tId].x = (float)sourceImg[tId].x;
blendedVals1[tId].y = (float)sourceImg[tId].y;
blendedVals1[tId].z = (float)sourceImg[tId].z;
blendedVals2[tId].x = (float)sourceImg[tId].x;
blendedVals2[tId].y = (float)sourceImg[tId].y;
blendedVals2[tId].z = (float)sourceImg[tId].z;
}
__global__
void computeIteration(const uchar4* const destImg,
const bool* const interiorPixels,
const bool* const borderPixels,
const float3* const blendedVals1,
const float3* const g,
float3* const blendedVals2,
const size_t numRows,
const size_t numCols)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= numRows*numCols) return;
if (!interiorPixels[tId]) return;
float blendedSumX = 0.f;
float blendedSumY = 0.f;
float blendedSumZ = 0.f;
float borderSumX = 0.f;
float borderSumY = 0.f;
float borderSumZ = 0.f;
if (interiorPixels[tId-1])
{
blendedSumX += blendedVals1[tId-1].x;
blendedSumY += blendedVals1[tId-1].y;
blendedSumZ += blendedVals1[tId-1].z;
}
else
{
borderSumX += destImg[tId-1].x;
borderSumY += destImg[tId-1].y;
borderSumZ += destImg[tId-1].z;
}
if (interiorPixels[tId+1])
{
blendedSumX += blendedVals1[tId+1].x;
blendedSumY += blendedVals1[tId+1].y;
blendedSumZ += blendedVals1[tId+1].z;
}
else
{
borderSumX += destImg[tId+1].x;
borderSumY += destImg[tId+1].y;
borderSumZ += destImg[tId+1].z;
}
if (interiorPixels[tId-numCols])
{
blendedSumX += blendedVals1[tId-numCols].x;
blendedSumY += blendedVals1[tId-numCols].y;
blendedSumZ += blendedVals1[tId-numCols].z;
}
else
{
borderSumX += destImg[tId-numCols].x;
borderSumY += destImg[tId-numCols].y;
borderSumZ += destImg[tId-numCols].z;
}
if (interiorPixels[tId+numCols])
{
blendedSumX += blendedVals1[tId+numCols].x;
blendedSumY += blendedVals1[tId+numCols].y;
blendedSumZ += blendedVals1[tId+numCols].z;
}
else
{
borderSumX += destImg[tId+numCols].x;
borderSumY += destImg[tId+numCols].y;
borderSumZ += destImg[tId+numCols].z;
}
float next_valX = (blendedSumX+borderSumX+g[tId].x)/4.f;
float next_valY = (blendedSumY+borderSumY+g[tId].y)/4.f;
float next_valZ = (blendedSumZ+borderSumZ+g[tId].z)/4.f;
blendedVals2[tId].x = min(255.f, max(0.f, next_valX));
blendedVals2[tId].y = min(255.f, max(0.f, next_valY));
blendedVals2[tId].z = min(255.f, max(0.f, next_valZ));
}
__global__
void copyBlendedValsToOutput(const float3* const blendedVals,
const bool* const interiorPixels,
uchar4* const blendedImg,
const size_t size)
{
const int tId = blockIdx.x*blockDim.x + threadIdx.x;
if (tId >= size) return;
if (!interiorPixels[tId]) return;
blendedImg[tId].x = (unsigned char)blendedVals[tId].x;
blendedImg[tId].y = (unsigned char)blendedVals[tId].y;
blendedImg[tId].z = (unsigned char)blendedVals[tId].z;
}
void debugPrint(uchar4* data, size_t rows, size_t cols)
{
std::ofstream outFile;
outFile.open("log", std::ios::trunc);
for (int i=0; i<rows; i++)
{
for (int j=0; j<cols; j++)
{
outFile << "(" << (int)data[i*cols+j].x << ", "
<< (int)data[i*cols+j].y << ", "
<< (int)data[i*cols+j].z << ", "
<< (int)data[i*cols+j].w << ") ";
}
outFile << std::endl;
}
outFile.close();
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
const size_t imgSize = numRowsSource*numColsSource;
unsigned int blockSize = 1024;
unsigned int gridSize = (imgSize+blockSize-1) / blockSize;
bool *d_mask;
uchar4 *d_sourceImg;
uchar4 *d_destImg;
uchar4 *d_blendedImg;
bool *d_interiorPixels;
bool *d_borderPixels;
float3 *d_g;
float3 *d_blendedVals1;
float3 *d_blendedVals2;
// device mem allocs
checkCudaErrors(cudaMalloc(&d_mask, sizeof(bool)*imgSize));
checkCudaErrors(cudaMalloc(&d_sourceImg, sizeof(uchar4)*imgSize));
checkCudaErrors(cudaMalloc(&d_destImg, sizeof(uchar4)*imgSize));
checkCudaErrors(cudaMalloc(&d_blendedImg, sizeof(uchar4)*imgSize));
checkCudaErrors(cudaMalloc(&d_interiorPixels, sizeof(bool)*imgSize));
checkCudaErrors(cudaMalloc(&d_borderPixels, sizeof(bool)*imgSize));
checkCudaErrors(cudaMalloc(&d_g, sizeof(float3)*imgSize));
checkCudaErrors(cudaMalloc(&d_blendedVals1, sizeof(float3)*imgSize));
checkCudaErrors(cudaMalloc(&d_blendedVals2, sizeof(float3)*imgSize));
// memcpy to device
checkCudaErrors(cudaMemcpy(d_sourceImg, h_sourceImg, sizeof(uchar4)*imgSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_destImg, h_destImg, sizeof(uchar4)*imgSize, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
// data preprocess
computeMask<<<gridSize, blockSize>>>(d_sourceImg, d_mask, imgSize);
cudaDeviceSynchronize();
computeBorderPixAndInteriorPix<<<gridSize, blockSize>>>(d_mask, d_interiorPixels,
d_borderPixels, numRowsSource, numColsSource);
cudaDeviceSynchronize();
computeG<<<gridSize, blockSize>>>(d_sourceImg, d_interiorPixels, d_g,
numRowsSource, numColsSource);
copySourceImgToBlendedVals<<<gridSize, blockSize>>>(d_sourceImg, d_blendedVals1, d_blendedVals2, imgSize);
cudaDeviceSynchronize();
// start iterations
for (int i=0; i<8000; i++)
{
computeIteration<<<gridSize, blockSize>>>(d_destImg, d_interiorPixels, d_borderPixels,
d_blendedVals1, d_g, d_blendedVals2, numRowsSource, numColsSource);
cudaDeviceSynchronize();
std::swap(d_blendedVals1, d_blendedVals2); // output goes to d_blendedVals1
}
// copy dtsImg to outputImg
checkCudaErrors(cudaMemcpy(d_blendedImg, d_destImg, sizeof(uchar4)*imgSize, cudaMemcpyDeviceToDevice));
cudaDeviceSynchronize();
copyBlendedValsToOutput<<<gridSize, blockSize>>>(d_blendedVals1, d_interiorPixels, d_blendedImg, imgSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(h_blendedImg, d_blendedImg, sizeof(uchar4)*imgSize, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
// free device memory
checkCudaErrors(cudaFree(d_mask));
checkCudaErrors(cudaFree(d_sourceImg));
checkCudaErrors(cudaFree(d_destImg));
checkCudaErrors(cudaFree(d_blendedImg));
checkCudaErrors(cudaFree(d_interiorPixels));
checkCudaErrors(cudaFree(d_borderPixels));
checkCudaErrors(cudaFree(d_g));
checkCudaErrors(cudaFree(d_blendedVals1));
checkCudaErrors(cudaFree(d_blendedVals2));
}
void poissonBlendingSample()
{
std::string input_source_file("datadropbox/input_source.png");
std::string input_dest_file("datadropbox/input_dest.png");
std::string output_file("datadropbox/output.png");
uchar4 *h_sourceImg, *h_destImg, *h_blendedImg;
size_t numRowsSource, numColsSource, numRowsDest, numColsDest;
loadImageRGBA(input_source_file, &h_sourceImg, &numRowsSource, &numColsSource);
loadImageRGBA(input_dest_file, &h_destImg, &numRowsDest, &numColsDest);
//debugPrint(h_sourceImg, numRowsSource, numColsSource);
assert(numRowsSource == numRowsDest);
assert(numColsSource == numColsDest);
h_blendedImg = new uchar4[numRowsSource*numColsSource];
printf("Poisson Blending sample starts ... \n");
your_blend(h_sourceImg, numRowsSource, numColsSource, h_destImg, h_blendedImg);
printf("DONE!\n");
saveImageRGBA(h_blendedImg, numRowsDest, numColsDest, output_file);
delete h_sourceImg;
delete h_destImg;
delete h_blendedImg;
}
|
20163e1d9cb2c571e1ccf06230c6a72a5abf735b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
typedef struct{
int* indices;
float* points;
int* neighbor;
float* k_simplices;
} alpha_complex;
__device__ float calc_sigma(int* indices, float* points)
//circle radius of triangle
{
float d[3];
float s = 0;
for (int i = 0; i<3; i++){
float p1 = points[indices[i]*2] - points[indices[(i+1)%3]*2];
float p2 = points[indices[i]*2+1]-points[indices[(i+1)%3]*2+1];
d[i] = sqrtf(p1*p1+p2*p2);
s += d[i];}
s = s/2;
float area = sqrtf(s*(s-d[0])*(s-d[1])*(s-d[2]));
float circle_r = d[0]*d[1]*d[2]/(4.0*area);
return circle_r;
}
__global__ void create_simplices(alpha_complex* complex){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int indices[3];
int indices2[3];
float *points = complex->points;
float *k_sim = complex->k_simplices;
for (int i = 0; i<3; i++){
indices[i] = complex->indices[idx*3+i];
}
for (int i = 0; i<3; i++){
k_sim[idx*15 + i*5 + 0] = (float)complex->indices[idx*3+i];
k_sim[idx*15 + i*5 + 1] = (float)complex->indices[idx*3+(i+1)%3];
float p1 = points[indices[i]*2] - points[indices[(i+1)%3]*2];
float p2 = points[indices[i]*2+1] - points[indices[(i+1)%3]*2+1];
float sigma = sqrtf(p1*p1+p2*p2);
k_sim[idx*15 + i*5 +2] = sigma;
if(complex->neighbor[idx*3+(i+2)%3] == -1)
//only calc one radius if no neighbor
{
float dist1 = calc_sigma(indices, points);
k_sim[idx*15 + i*5 + 3] = fminf(dist1,sigma);
k_sim[idx*15 + i*5 + 4] = 99999.0;
}
else
//calc radius of nearest neighbor triangles and line distance
{
//todo: set neighbor to -1 to avoid double analysis
for(int j = 0;j<3;j++){
indices2[j] = complex->indices[complex->neighbor[idx*3+(i+2)%3]*3+j];
//weird indexing from scipy delaunay
}
float dist1 = calc_sigma(indices, points);
float dist2 = calc_sigma(indices2, points);
if (fminf(dist1, dist2)<1){
k_sim[idx*15 + i*5 + 3] = sigma;
}
else{
k_sim[idx*15 + i*5 + 3] = fminf(dist1, dist2);
}
k_sim[idx*15 + i*5 + 4] = fmaxf(dist1, dist2);
}
}
} | 20163e1d9cb2c571e1ccf06230c6a72a5abf735b.cu | typedef struct{
int* indices;
float* points;
int* neighbor;
float* k_simplices;
} alpha_complex;
__device__ float calc_sigma(int* indices, float* points)
//circle radius of triangle
{
float d[3];
float s = 0;
for (int i = 0; i<3; i++){
float p1 = points[indices[i]*2] - points[indices[(i+1)%3]*2];
float p2 = points[indices[i]*2+1]-points[indices[(i+1)%3]*2+1];
d[i] = sqrtf(p1*p1+p2*p2);
s += d[i];}
s = s/2;
float area = sqrtf(s*(s-d[0])*(s-d[1])*(s-d[2]));
float circle_r = d[0]*d[1]*d[2]/(4.0*area);
return circle_r;
}
__global__ void create_simplices(alpha_complex* complex){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int indices[3];
int indices2[3];
float *points = complex->points;
float *k_sim = complex->k_simplices;
for (int i = 0; i<3; i++){
indices[i] = complex->indices[idx*3+i];
}
for (int i = 0; i<3; i++){
k_sim[idx*15 + i*5 + 0] = (float)complex->indices[idx*3+i];
k_sim[idx*15 + i*5 + 1] = (float)complex->indices[idx*3+(i+1)%3];
float p1 = points[indices[i]*2] - points[indices[(i+1)%3]*2];
float p2 = points[indices[i]*2+1] - points[indices[(i+1)%3]*2+1];
float sigma = sqrtf(p1*p1+p2*p2);
k_sim[idx*15 + i*5 +2] = sigma;
if(complex->neighbor[idx*3+(i+2)%3] == -1)
//only calc one radius if no neighbor
{
float dist1 = calc_sigma(indices, points);
k_sim[idx*15 + i*5 + 3] = fminf(dist1,sigma);
k_sim[idx*15 + i*5 + 4] = 99999.0;
}
else
//calc radius of nearest neighbor triangles and line distance
{
//todo: set neighbor to -1 to avoid double analysis
for(int j = 0;j<3;j++){
indices2[j] = complex->indices[complex->neighbor[idx*3+(i+2)%3]*3+j];
//weird indexing from scipy delaunay
}
float dist1 = calc_sigma(indices, points);
float dist2 = calc_sigma(indices2, points);
if (fminf(dist1, dist2)<1){
k_sim[idx*15 + i*5 + 3] = sigma;
}
else{
k_sim[idx*15 + i*5 + 3] = fminf(dist1, dist2);
}
k_sim[idx*15 + i*5 + 4] = fmaxf(dist1, dist2);
}
}
} |
d9bf1d2b922d5980286cf476719778022b84bbcf.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "include/device_vector.h"
#include "Parameters.h"
#include "common_template_functions.h"
#include "GemmCallerMMA.h"
extern void show_iteration_status(int it, bool verbose, double copy_time, double comparing_time,
double computation_time, std::pair<int, int> errors);
template<typename half_t, typename real_t>
void setup_execute(Parameters& parameters, TensorCoresCaller<half_t, real_t>& mult_env,
const uint32_t threshold = 0) {
double elapsed_time = 0;
std::vector<half_t> a_vector_host(parameters.size_matrices * parameters.size_matrices);
std::vector<half_t> b_vector_host(parameters.size_matrices * parameters.size_matrices);
std::vector<real_t> c_vector_host(parameters.size_matrices * parameters.size_matrices);
std::vector<real_t> gold_host(parameters.size_matrices * parameters.size_matrices);
//Output host vectors are set after computation
std::vector<real_t> d_vector_host_real_t;
std::vector<real_t> d_vector_host_half_t;
if (parameters.generate) {
std::cout << "Generating input matrices\n";
auto read_abc_files_on_generate = (parameters.check_input_existence
&& exists(parameters.a_input_path) && exists(parameters.b_input_path)
&& exists(parameters.c_input_path));
get_input_matrices(parameters.size_matrices, a_vector_host, b_vector_host, c_vector_host,
parameters.a_input_path, parameters.b_input_path, parameters.c_input_path,
read_abc_files_on_generate);
} else {
std::cout << "Reading input matrices\n";
read_abc_files(parameters.a_input_path, a_vector_host, parameters.b_input_path,
b_vector_host, parameters.c_input_path, c_vector_host);
read_gold(parameters.gold_inout_path, gold_host);
}
//Alloc only after reading the inputs
rad::DeviceVector<half_t> a_vector_device = a_vector_host;
rad::DeviceVector<half_t> b_vector_device = b_vector_host;
rad::DeviceVector<real_t> c_vector_device = c_vector_host;
rad::DeviceVector<real_t> d_vector_device(parameters.size_matrices * parameters.size_matrices);
rad::DeviceVector<real_t> d_vector_half_t_device(
parameters.size_matrices * parameters.size_matrices);
std::cout << "Starting the setup process...\n";
std::cout << std::setprecision(5) << std::fixed;
for (int it = 0; it < parameters.iterations; it++) {
auto computation_time = rad::mysecond();
parameters.start_iteration();
mult_env.gemm(a_vector_device, b_vector_device, c_vector_device, d_vector_device,
d_vector_half_t_device, real_t(parameters.alpha), real_t(parameters.beta),
parameters.size_matrices, parameters.size_matrices, threshold);
rad::checkFrameworkErrors(hipDeviceSynchronize());
rad::checkFrameworkErrors(hipPeekAtLastError());
//end iteration
parameters.end_iteration();
computation_time = rad::mysecond() - computation_time;
elapsed_time += computation_time;
double copy_time = rad::mysecond();
mult_env.memcpy_half_t_mem(d_vector_host_half_t, d_vector_half_t_device);
d_vector_device.to_vector(d_vector_host_real_t);
copy_time = rad::mysecond() - copy_time;
if (!parameters.generate) {
auto comparing_time = rad::mysecond();
auto errors = check_output_errors_dmr(gold_host, d_vector_host_real_t,
d_vector_host_half_t, parameters, threshold, mult_env.duplicated);
comparing_time = rad::mysecond() - comparing_time;
show_iteration_status(it, parameters.verbose, copy_time, comparing_time,
computation_time, errors);
//If errors != 0 reload matrices to gpu
if (errors.first != 0 || errors.second != 0) {
read_abc_files(parameters.a_input_path, a_vector_host, parameters.b_input_path,
b_vector_host, parameters.c_input_path, c_vector_host);
read_gold(parameters.gold_inout_path, gold_host);
a_vector_device.resize(0);
b_vector_device.resize(0);
c_vector_device.resize(0);
d_vector_device.resize(0);
d_vector_half_t_device.resize(0);
a_vector_device = a_vector_host;
b_vector_device = b_vector_host;
c_vector_device = c_vector_host;
d_vector_device = d_vector_host_real_t;
d_vector_half_t_device = d_vector_host_half_t;
}
}
}
if (parameters.verbose) {
std::cout << "Elapsed time: " << (elapsed_time / parameters.iterations) << " s\n";
} else {
std::cout << "done.\n";
}
if (parameters.generate) {
auto zero_count = 0ul;
auto nans_count = 0ul;
for (auto s : d_vector_host_real_t) {
zero_count += (float(s) == 0.0f);
nans_count += (std::isnan(float(s)));
}
std::cout << "Zero values on gold: " << zero_count << std::endl;
std::cout << "Nans values on gold: " << nans_count << std::endl;
write_gold(parameters.gold_inout_path, d_vector_host_real_t);
}
}
void setup_gemm_tensor_cores_unhardened(Parameters& parameters) {
if (parameters.precision == "half") {
UnhardenedTensorCoresCaller<half> gemm_obj(parameters.size_matrices,
parameters.size_matrices);
setup_execute(parameters, gemm_obj);
}
if (parameters.precision == "float" || parameters.precision == "single"
|| parameters.precision == "double") {
throw_line(parameters.precision + " using tensorcores not ready yet!!!");
}
}
void setup_gemm_tensor_cores_dmr(Parameters& parameters) {
if (parameters.precision == "half") {
DMRTensorCoresCaller<half> gemm_obj(parameters.size_matrices, parameters.size_matrices);
setup_execute(parameters, gemm_obj);
}
if (parameters.precision == "float" || parameters.precision == "single"
|| parameters.precision == "double") {
throw_line(parameters.precision + " using tensorcores not ready yet!!!");
}
}
| d9bf1d2b922d5980286cf476719778022b84bbcf.cu | #include <iostream>
#include "include/device_vector.h"
#include "Parameters.h"
#include "common_template_functions.h"
#include "GemmCallerMMA.h"
extern void show_iteration_status(int it, bool verbose, double copy_time, double comparing_time,
double computation_time, std::pair<int, int> errors);
template<typename half_t, typename real_t>
void setup_execute(Parameters& parameters, TensorCoresCaller<half_t, real_t>& mult_env,
const uint32_t threshold = 0) {
double elapsed_time = 0;
std::vector<half_t> a_vector_host(parameters.size_matrices * parameters.size_matrices);
std::vector<half_t> b_vector_host(parameters.size_matrices * parameters.size_matrices);
std::vector<real_t> c_vector_host(parameters.size_matrices * parameters.size_matrices);
std::vector<real_t> gold_host(parameters.size_matrices * parameters.size_matrices);
//Output host vectors are set after computation
std::vector<real_t> d_vector_host_real_t;
std::vector<real_t> d_vector_host_half_t;
if (parameters.generate) {
std::cout << "Generating input matrices\n";
auto read_abc_files_on_generate = (parameters.check_input_existence
&& exists(parameters.a_input_path) && exists(parameters.b_input_path)
&& exists(parameters.c_input_path));
get_input_matrices(parameters.size_matrices, a_vector_host, b_vector_host, c_vector_host,
parameters.a_input_path, parameters.b_input_path, parameters.c_input_path,
read_abc_files_on_generate);
} else {
std::cout << "Reading input matrices\n";
read_abc_files(parameters.a_input_path, a_vector_host, parameters.b_input_path,
b_vector_host, parameters.c_input_path, c_vector_host);
read_gold(parameters.gold_inout_path, gold_host);
}
//Alloc only after reading the inputs
rad::DeviceVector<half_t> a_vector_device = a_vector_host;
rad::DeviceVector<half_t> b_vector_device = b_vector_host;
rad::DeviceVector<real_t> c_vector_device = c_vector_host;
rad::DeviceVector<real_t> d_vector_device(parameters.size_matrices * parameters.size_matrices);
rad::DeviceVector<real_t> d_vector_half_t_device(
parameters.size_matrices * parameters.size_matrices);
std::cout << "Starting the setup process...\n";
std::cout << std::setprecision(5) << std::fixed;
for (int it = 0; it < parameters.iterations; it++) {
auto computation_time = rad::mysecond();
parameters.start_iteration();
mult_env.gemm(a_vector_device, b_vector_device, c_vector_device, d_vector_device,
d_vector_half_t_device, real_t(parameters.alpha), real_t(parameters.beta),
parameters.size_matrices, parameters.size_matrices, threshold);
rad::checkFrameworkErrors(cudaDeviceSynchronize());
rad::checkFrameworkErrors(cudaPeekAtLastError());
//end iteration
parameters.end_iteration();
computation_time = rad::mysecond() - computation_time;
elapsed_time += computation_time;
double copy_time = rad::mysecond();
mult_env.memcpy_half_t_mem(d_vector_host_half_t, d_vector_half_t_device);
d_vector_device.to_vector(d_vector_host_real_t);
copy_time = rad::mysecond() - copy_time;
if (!parameters.generate) {
auto comparing_time = rad::mysecond();
auto errors = check_output_errors_dmr(gold_host, d_vector_host_real_t,
d_vector_host_half_t, parameters, threshold, mult_env.duplicated);
comparing_time = rad::mysecond() - comparing_time;
show_iteration_status(it, parameters.verbose, copy_time, comparing_time,
computation_time, errors);
//If errors != 0 reload matrices to gpu
if (errors.first != 0 || errors.second != 0) {
read_abc_files(parameters.a_input_path, a_vector_host, parameters.b_input_path,
b_vector_host, parameters.c_input_path, c_vector_host);
read_gold(parameters.gold_inout_path, gold_host);
a_vector_device.resize(0);
b_vector_device.resize(0);
c_vector_device.resize(0);
d_vector_device.resize(0);
d_vector_half_t_device.resize(0);
a_vector_device = a_vector_host;
b_vector_device = b_vector_host;
c_vector_device = c_vector_host;
d_vector_device = d_vector_host_real_t;
d_vector_half_t_device = d_vector_host_half_t;
}
}
}
if (parameters.verbose) {
std::cout << "Elapsed time: " << (elapsed_time / parameters.iterations) << " s\n";
} else {
std::cout << "done.\n";
}
if (parameters.generate) {
auto zero_count = 0ul;
auto nans_count = 0ul;
for (auto s : d_vector_host_real_t) {
zero_count += (float(s) == 0.0f);
nans_count += (std::isnan(float(s)));
}
std::cout << "Zero values on gold: " << zero_count << std::endl;
std::cout << "Nans values on gold: " << nans_count << std::endl;
write_gold(parameters.gold_inout_path, d_vector_host_real_t);
}
}
void setup_gemm_tensor_cores_unhardened(Parameters& parameters) {
if (parameters.precision == "half") {
UnhardenedTensorCoresCaller<half> gemm_obj(parameters.size_matrices,
parameters.size_matrices);
setup_execute(parameters, gemm_obj);
}
if (parameters.precision == "float" || parameters.precision == "single"
|| parameters.precision == "double") {
throw_line(parameters.precision + " using tensorcores not ready yet!!!");
}
}
void setup_gemm_tensor_cores_dmr(Parameters& parameters) {
if (parameters.precision == "half") {
DMRTensorCoresCaller<half> gemm_obj(parameters.size_matrices, parameters.size_matrices);
setup_execute(parameters, gemm_obj);
}
if (parameters.precision == "float" || parameters.precision == "single"
|| parameters.precision == "double") {
throw_line(parameters.precision + " using tensorcores not ready yet!!!");
}
}
|
4cfe0e4a4b2394bdcc65b76c6fc5f6ab25d54fbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <vector>
#include "caffe/layers/elu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ELUForward(const int n, const Dtype* in, Dtype* out,
Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] :
alpha * (exp(in[index]) - 1);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ELUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, alpha);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ELUBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data,
Dtype* out_diff, Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_data[index] > 0 ? in_diff[index] :
in_diff[index] * (out_data[index] + alpha);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ELUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_data, bottom_diff, alpha);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ELULayer);
} // namespace caffe
| 4cfe0e4a4b2394bdcc65b76c6fc5f6ab25d54fbd.cu | /*
All modification made by Cambricon Corporation: © 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <vector>
#include "caffe/layers/elu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ELUForward(const int n, const Dtype* in, Dtype* out,
Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] :
alpha * (exp(in[index]) - 1);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
ELUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, alpha);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ELUBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data,
Dtype* out_diff, Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_data[index] > 0 ? in_diff[index] :
in_diff[index] * (out_data[index] + alpha);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
ELUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_data, bottom_diff, alpha);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ELULayer);
} // namespace caffe
|
7e6143ec292b013520b3fa19d309f29b600ff3a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//CS-4370 Parallel Programming for many core GPUs
//Name: Gesu Bal
/*
this is a simple cuda program calculating vector add for 2 dimensions on GPU device
I added two two-dimensional matrices A, B on the device GPU.
After the device matrix addition kernel function is invoked, and the addition result is transferred back to the CPU.
The program will also compute the sum matrix of matrices A and B using the CPU.
Then the program compares the device-computed result with the CPU-computed result.
If it matches, it prints out Test PASSED to the screen before exiting.
*/
#include<stdio.h>
#include<cuda.h>
int N,blocksize;
//gpu function for addition
__global__ void add_gpu(int *d_a, int *d_b, int *d_c, int N)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
//int index =i+(j*N);
if((row <N) && (col <N))
{
d_c[row*N+col]=d_a[row*N+col]+d_b[row*N+col];
}
}
//cpu function for addition
void add_matrix_cpu(int *a, int *b, int *cpu_c, int N)
{
int i, j;
for (i=0;i<N;i++) {
for (j=0;j<N;j++) {
cpu_c[i*N+j]=a[i*N+j]+b[i*N+j];
}
}
}
//match cpu and gpu results
int verify(int * a, int * b, int N)
{
int i,j;
int error=0;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i*N+j]!=b[i*N+j])
{
error++;
}
}
}
if(error==0)
{
printf("CPU and GPU results matched: Test Passed \n");
}
else
{
printf("CPU and GPU results did not match");
}
return 1;
}
//print matrix fucntion
int printMatrix(int *a,int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
printf("%d\t",a[i*N+j]);
}
printf("\n");
}
return 1;
}
int main()
{
//user input
int r, col;
printf("Select one of the following options for vector addition: \n");
printf("Press a for matrix size 8 * 8 \n");
printf("Press b for matrix size 64 * 64 \n");
printf("Press c for matrix size 128 * 128 \n");
printf("Press d for matrix size 500 * 500 \n");
printf("Press e for matrix size 1000 * 1000 \n");
printf("Press any other key for exiting \n");
char ch;
scanf("%c",&ch);
switch(ch)
{
case 'a':
r=8;
col=8;
N=8;
blocksize=4;
printf("Array size is 8 * 8 \n");
break;
case 'b':
r=64;
col=64;
N=64;
blocksize=16;
printf("Array size is 64 * 64 \n");
break;
case 'c':
r=128;
col=128;
N=128;
blocksize=16;
printf("Array size is 128 * 128 \n");
break;
case 'd':
r=500;
col=500;
N=500;
blocksize=16;
printf("Array size is 500 * 500 \n");
break;
case 'e':
r=1000;
col=1000;
N=1000;
blocksize=16;
printf("Array size is 1000 * 1000 \n");
break;
default:
exit(1);
break;
}
//vector initialization
int *a, *b, *c, *cpu_c, *d_a, *d_b, *d_c;
int a_size=r*col;
int b_size=r*col;
int c_size=r*col;
int cpu_c_size=r*col;
//memory allocation for vectors on host
a=(int*)malloc(sizeof(int)*a_size);
b=(int*)malloc(sizeof(int)*b_size);
c=(int*)malloc(sizeof(int)*c_size);
cpu_c=(int*)malloc(sizeof(int)*cpu_c_size);
//matrix initialization
int i,j;
int init=1325;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
init=3125*init%65536;
a[i*col+j]=((init-32768)/16384);
b[i*col+j]=(init%1000);
}
}
int cudaret=hipMalloc((void **)(&d_a),(N*N)*sizeof(int));
if(cudaret!=hipSuccess)
{printf("memory was not allocated on device \n");}
hipMalloc((void **)(&d_b),(N*N)*sizeof(int));
hipMalloc((void **)(&d_c),(N*N)*sizeof(int));
//copying contents of a and b to device arrays
hipMemcpy(d_a,a,(N*N)*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_b,b,(N*N)*sizeof(int),hipMemcpyHostToDevice);
//Initializing block count and block size
dim3 dimBlock(blocksize,blocksize,1);
int blockCount_x = (N - 1)/(double(blocksize))+1;//Get number of blocks needed per direction.
int blockCount_y = (N - 1)/(double(blocksize))+1;
printf("the number of the thread blocks in x direction will be %d\n", blockCount_x);
printf("the number of the thread blocks in y direction will be %d\n", blockCount_y);
dim3 dimGrid(blockCount_x,blockCount_y,1);
//calling CPU program
printf("calculating results for CPU vector addition \n");
printf("---------\n");
add_matrix_cpu(a,b,cpu_c,N);
//printMatrix(a,N);
//pritnMatrix(b,N);
//printMatrix(cpu_c,N);
//call kernel for gpu functioning
printf("calling kernel for gpu computations for vector addition \n");
printf("---------\n");
hipLaunchKernelGGL(( add_gpu), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a,d_b,d_c,N);
printf("calculating results for gpu \n");
printf("---------\n");
//copying resulting back to cpu from gpu
hipMemcpy(c,d_c,(N*N)*sizeof(int),hipMemcpyDeviceToHost);
//matching cpu and gpu results
printf("comparing results for CPU and GPU computations \n");
printf("---------\n");
verify(c,cpu_c,N);
//printMatrix(c,N);
//Deallocating memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 7e6143ec292b013520b3fa19d309f29b600ff3a9.cu | //CS-4370 Parallel Programming for many core GPUs
//Name: Gesu Bal
/*
this is a simple cuda program calculating vector add for 2 dimensions on GPU device
I added two two-dimensional matrices A, B on the device GPU.
After the device matrix addition kernel function is invoked, and the addition result is transferred back to the CPU.
The program will also compute the sum matrix of matrices A and B using the CPU.
Then the program compares the device-computed result with the CPU-computed result.
If it matches, it prints out Test PASSED to the screen before exiting.
*/
#include<stdio.h>
#include<cuda.h>
int N,blocksize;
//gpu function for addition
__global__ void add_gpu(int *d_a, int *d_b, int *d_c, int N)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
//int index =i+(j*N);
if((row <N) && (col <N))
{
d_c[row*N+col]=d_a[row*N+col]+d_b[row*N+col];
}
}
//cpu function for addition
void add_matrix_cpu(int *a, int *b, int *cpu_c, int N)
{
int i, j;
for (i=0;i<N;i++) {
for (j=0;j<N;j++) {
cpu_c[i*N+j]=a[i*N+j]+b[i*N+j];
}
}
}
//match cpu and gpu results
int verify(int * a, int * b, int N)
{
int i,j;
int error=0;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i*N+j]!=b[i*N+j])
{
error++;
}
}
}
if(error==0)
{
printf("CPU and GPU results matched: Test Passed \n");
}
else
{
printf("CPU and GPU results did not match");
}
return 1;
}
//print matrix fucntion
int printMatrix(int *a,int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
printf("%d\t",a[i*N+j]);
}
printf("\n");
}
return 1;
}
int main()
{
//user input
int r, col;
printf("Select one of the following options for vector addition: \n");
printf("Press a for matrix size 8 * 8 \n");
printf("Press b for matrix size 64 * 64 \n");
printf("Press c for matrix size 128 * 128 \n");
printf("Press d for matrix size 500 * 500 \n");
printf("Press e for matrix size 1000 * 1000 \n");
printf("Press any other key for exiting \n");
char ch;
scanf("%c",&ch);
switch(ch)
{
case 'a':
r=8;
col=8;
N=8;
blocksize=4;
printf("Array size is 8 * 8 \n");
break;
case 'b':
r=64;
col=64;
N=64;
blocksize=16;
printf("Array size is 64 * 64 \n");
break;
case 'c':
r=128;
col=128;
N=128;
blocksize=16;
printf("Array size is 128 * 128 \n");
break;
case 'd':
r=500;
col=500;
N=500;
blocksize=16;
printf("Array size is 500 * 500 \n");
break;
case 'e':
r=1000;
col=1000;
N=1000;
blocksize=16;
printf("Array size is 1000 * 1000 \n");
break;
default:
exit(1);
break;
}
//vector initialization
int *a, *b, *c, *cpu_c, *d_a, *d_b, *d_c;
int a_size=r*col;
int b_size=r*col;
int c_size=r*col;
int cpu_c_size=r*col;
//memory allocation for vectors on host
a=(int*)malloc(sizeof(int)*a_size);
b=(int*)malloc(sizeof(int)*b_size);
c=(int*)malloc(sizeof(int)*c_size);
cpu_c=(int*)malloc(sizeof(int)*cpu_c_size);
//matrix initialization
int i,j;
int init=1325;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
init=3125*init%65536;
a[i*col+j]=((init-32768)/16384);
b[i*col+j]=(init%1000);
}
}
int cudaret=cudaMalloc((void **)(&d_a),(N*N)*sizeof(int));
if(cudaret!=cudaSuccess)
{printf("memory was not allocated on device \n");}
cudaMalloc((void **)(&d_b),(N*N)*sizeof(int));
cudaMalloc((void **)(&d_c),(N*N)*sizeof(int));
//copying contents of a and b to device arrays
cudaMemcpy(d_a,a,(N*N)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,(N*N)*sizeof(int),cudaMemcpyHostToDevice);
//Initializing block count and block size
dim3 dimBlock(blocksize,blocksize,1);
int blockCount_x = (N - 1)/(double(blocksize))+1;//Get number of blocks needed per direction.
int blockCount_y = (N - 1)/(double(blocksize))+1;
printf("the number of the thread blocks in x direction will be %d\n", blockCount_x);
printf("the number of the thread blocks in y direction will be %d\n", blockCount_y);
dim3 dimGrid(blockCount_x,blockCount_y,1);
//calling CPU program
printf("calculating results for CPU vector addition \n");
printf("---------\n");
add_matrix_cpu(a,b,cpu_c,N);
//printMatrix(a,N);
//pritnMatrix(b,N);
//printMatrix(cpu_c,N);
//call kernel for gpu functioning
printf("calling kernel for gpu computations for vector addition \n");
printf("---------\n");
add_gpu<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,N);
printf("calculating results for gpu \n");
printf("---------\n");
//copying resulting back to cpu from gpu
cudaMemcpy(c,d_c,(N*N)*sizeof(int),cudaMemcpyDeviceToHost);
//matching cpu and gpu results
printf("comparing results for CPU and GPU computations \n");
printf("---------\n");
verify(c,cpu_c,N);
//printMatrix(c,N);
//Deallocating memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
41661f866a1e812f2eca0d7360eb7a1b7f26a312.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***
* $Id$
**
* File: gpu_util.cu
* Created: Mar 22, 2019
*
* Parts of this code have been derived from NVIDIA samples: cuda-8.0/samples/6_Advanced/reduction
* with the following copyright:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file is part of cuSABNAtk.
*
*/
#ifndef GPU_UTIL_CU
#define GPU_UTIL_CU
#include <cstdint>
#include <stdio.h>
#include "gpu_util_hip.cuh"
__constant__ uint64_t aritiesPtr_[4][10];
__constant__ uint64_t aritiesPrefixProdPtr_[4][11];
__constant__ uint64_t aritiesPrefixSumPtr_[4][10];
template <class T, unsigned int blockSize, bool nIsPow2, bool isFinalStage>
__global__ void counts(const T* inputData,
T* outputData,
T* outputDataPa,
T* intermediateData,
unsigned int words_per_vector, // m / 64
int variablesCount, // number of variables in a query
int configs_per_query, /* number of configs*/
int startVariableId,
int streamId,
hipStream_t stream,
int parentBlockId);
// from cuda samples reduction
CUDA_CALLABLE unsigned int nextPow2(unsigned int x) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
} // nextPow2
template <bool isFinalStage>
CUDA_CALLABLE void startKernel(const uint64_t* inputData,
uint64_t* outputData,
uint64_t* outputDataPa,
uint64_t* intermediateData,
unsigned int words_per_vector, // m / 64
int variablesCount, // number of variables in a query
int configs_per_query, /* number of configs*/
int startVariableId,
int streamId,
hipStream_t stream,
int threadCount,
int parentBlockId) {
dim3 dimBlock(threadCount, 1, 1);
dim3 dimGrid(configs_per_query, 1, 1);
int smemSize = (threadCount <= 32) ? 2 * threadCount * sizeof(uint64_t) : threadCount * sizeof(uint64_t);
if (isFinalStage) {
smemSize *= 2;
switch (threadCount) {
case 512:
hipLaunchKernelGGL(( counts<uint64_t, 512, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 256:
hipLaunchKernelGGL(( counts<uint64_t, 256, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 128:
hipLaunchKernelGGL(( counts<uint64_t, 128, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 64:
hipLaunchKernelGGL(( counts<uint64_t, 64, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 32:
hipLaunchKernelGGL(( counts<uint64_t, 32, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 16:
hipLaunchKernelGGL(( counts<uint64_t, 16, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 8:
hipLaunchKernelGGL(( counts<uint64_t, 8, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 4:
hipLaunchKernelGGL(( counts<uint64_t, 4, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 2:
hipLaunchKernelGGL(( counts<uint64_t, 2, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 1:
hipLaunchKernelGGL(( counts<uint64_t, 1, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, 0,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
default:
printf("Unsupported thread count. Exiting.\n");
}
} else {
switch (threadCount) {
case 512:
hipLaunchKernelGGL(( counts<uint64_t, 512, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 256:
hipLaunchKernelGGL(( counts<uint64_t, 256, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 128:
hipLaunchKernelGGL(( counts<uint64_t, 128, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 64:
hipLaunchKernelGGL(( counts<uint64_t, 64, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 32:
hipLaunchKernelGGL(( counts<uint64_t, 32, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 16:
hipLaunchKernelGGL(( counts<uint64_t, 16, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 8:
hipLaunchKernelGGL(( counts<uint64_t, 8, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 4:
hipLaunchKernelGGL(( counts<uint64_t, 4, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 2:
hipLaunchKernelGGL(( counts<uint64_t, 2, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 1:
hipLaunchKernelGGL(( counts<uint64_t, 1, false, isFinalStage>), dim3(dimGrid), dim3(dimBlock), smemSize, stream,
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
default:
printf("Unsupported thread count. Exiting.\n");
}
}
cucheck_dev(hipGetLastError());
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T> struct SharedMemory {
__device__ inline operator T*() {
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const {
extern __shared__ int __smem[];
return (T*)__smem;
}
}; // struct SharedMemory
__host__ void copyAritiesToDevice(int streamId,
const std::vector<uint64_t>& pArities,
const std::vector<uint64_t>& pAritiesPrefixProd,
const std::vector<uint64_t>& pAritiesPrefixSum) {
cucheck_dev( hipMemcpyToSymbol(aritiesPtr_, pArities.data(),
pArities.size() * sizeof(uint64_t), streamId * sizeof(uint64_t) * 10) );
cucheck_dev( hipMemcpyToSymbol(aritiesPrefixProdPtr_,
pAritiesPrefixProd.data(), pAritiesPrefixProd.size() * sizeof(uint64_t), streamId * sizeof(uint64_t) * 11) );
cucheck_dev( hipMemcpyToSymbol(aritiesPrefixSumPtr_,
pAritiesPrefixSum.data(), pAritiesPrefixSum.size() * sizeof(uint64_t), streamId * sizeof(uint64_t) * 10) );
} // m_copyAritiesToDevice__
template <class T, unsigned int blockSize, bool nIsPow2, bool isFinalStage>
__global__ void counts(const T* inputData,
T* outputData,
T* outputDataPa,
T* intermediateData,
unsigned int words_per_vector, // m / 64
int variablesCount, // number of variables in a query
int configs_per_query, /* number of configs*/
int startVariableId,
int streamId,
hipStream_t stream,
int parentBlockId) {
//TODO: we don't really need two shared mems.
T* sDataPa = SharedMemory<T>();
T* sDataTot = &sDataPa[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize + threadIdx.x;
unsigned int word_index = i % blockSize; // can't this be tid?
int intermediateResultIndex;
//TODO: remove the below constant
if (isFinalStage) {
intermediateResultIndex = (streamId * words_per_vector * 32) + (parentBlockId * words_per_vector) + word_index;
} else {
intermediateResultIndex = (streamId * words_per_vector * 32) + (blockIdx.x * words_per_vector) + word_index;
}
T totSum = 0;
T paSum = 0;
T xiBitVect;
int temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][startVariableId]) % aritiesPtr_[streamId][startVariableId]);
T paBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][startVariableId] + temp) * words_per_vector) + word_index);
// running sum for all word slices
for (int p = startVariableId + 1; p < min(5 + startVariableId, variablesCount-1); ++p) {
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][p]) % aritiesPtr_[streamId][p]);
paBitVect = paBitVect & *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][p] + temp) * words_per_vector) + word_index);
}
if (isFinalStage) {
paBitVect &= intermediateData[intermediateResultIndex];
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][variablesCount-1]) % aritiesPtr_[streamId][variablesCount-1]);
xiBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][variablesCount-1] + temp) * words_per_vector) + word_index);
xiBitVect &= paBitVect;
totSum += __popcll(xiBitVect);
} else {
intermediateData[intermediateResultIndex] = paBitVect;
}
paSum += __popcll(paBitVect);
// ensure we don't read out of bounds -- this is optimized away for power of 2 sized arrays
if (nIsPow2 || (tid + blockSize < words_per_vector)) {
unsigned int word_index_upper_half = word_index + blockSize;
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][startVariableId]) % aritiesPtr_[streamId][startVariableId]);
paBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][startVariableId] + temp) * words_per_vector) + word_index_upper_half);
for (int p = startVariableId + 1; p < min(5 + startVariableId, variablesCount-1); p++) {
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][p]) % aritiesPtr_[streamId][p]);
paBitVect = paBitVect & *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][p] + temp) * words_per_vector) + word_index_upper_half);
}
if (isFinalStage) {
paBitVect &= intermediateData[intermediateResultIndex + blockSize];
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][variablesCount-1]) % aritiesPtr_[streamId][variablesCount-1]);
xiBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][variablesCount-1] + temp) * words_per_vector) + word_index_upper_half);
xiBitVect &= paBitVect;
totSum += __popcll(xiBitVect);
} else {
intermediateData[intermediateResultIndex + blockSize] = paBitVect;
}
paSum += __popcll(paBitVect);
}
// each thread puts its local sum into shared memory
if (isFinalStage) {
sDataTot[tid] = totSum;
}
sDataPa[tid] = paSum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 256];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 128];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 64];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 ) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) {
if (isFinalStage) {
totSum += sDataTot[tid + 32];
}
paSum += sDataPa[tid + 32];
}
// Reduce final warp using shuffle
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
if (isFinalStage) {
totSum += __shfl_down_sync(0xFFFFFFFF, totSum, offset);
}
paSum += __shfl_down_sync(0xFFFFFFFF, paSum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 32];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 16];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 8];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 4];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 2];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 1];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) {
//TODO: bypass this logic if number of variables is already small
if (isFinalStage) {
//TODO: use global constant here or something else?
outputData[(streamId*1024) + (parentBlockId * 32) + blockIdx.x] = totSum;
outputDataPa[(streamId*1024) + (parentBlockId * 32) + blockIdx.x] = paSum;
} else if (paSum > 0) {
int threadCount = nextPow2((words_per_vector + 1) >> 1);
startKernel<true>(inputData,
outputData,
outputDataPa,
intermediateData,
words_per_vector,
variablesCount, // number of variables in a query
32, /* number of configs*/
5, //TODO: make it safer
0,
stream,
threadCount,
blockIdx.x);
//TODO: memset 0 results here
}
}
__syncthreads();
} // counts
inline bool isPow2(unsigned int x) { return ((x & (x - 1)) == 0); }
void cudaCallBlockCount(const uint block_count,
const uint per_block_thread_count,
const uint words_per_vector,
const uint variablesCount,
const uint configs_per_query,
const uint64_t *bvectorsPtr,
uint64_t *results,
uint64_t *resultsPa,
uint64_t *intermediateData,
int streamId,
hipStream_t stream) {
int threadCount = nextPow2((words_per_vector + 1) >> 1);
startKernel<false>(bvectorsPtr, results, resultsPa, intermediateData, words_per_vector,
variablesCount, configs_per_query, 0, streamId, stream, threadCount, -1);
cucheck_dev( hipStreamSynchronize(stream) );
} // cudaCallBlockCount
#endif // GPU_UTIL_CU
| 41661f866a1e812f2eca0d7360eb7a1b7f26a312.cu | /***
* $Id$
**
* File: gpu_util.cu
* Created: Mar 22, 2019
*
* Parts of this code have been derived from NVIDIA samples: cuda-8.0/samples/6_Advanced/reduction
* with the following copyright:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file is part of cuSABNAtk.
*
*/
#ifndef GPU_UTIL_CU
#define GPU_UTIL_CU
#include <cstdint>
#include <stdio.h>
#include "gpu_util.cuh"
__constant__ uint64_t aritiesPtr_[4][10];
__constant__ uint64_t aritiesPrefixProdPtr_[4][11];
__constant__ uint64_t aritiesPrefixSumPtr_[4][10];
template <class T, unsigned int blockSize, bool nIsPow2, bool isFinalStage>
__global__ void counts(const T* inputData,
T* outputData,
T* outputDataPa,
T* intermediateData,
unsigned int words_per_vector, // m / 64
int variablesCount, // number of variables in a query
int configs_per_query, /* number of configs*/
int startVariableId,
int streamId,
cudaStream_t stream,
int parentBlockId);
// from cuda samples reduction
CUDA_CALLABLE unsigned int nextPow2(unsigned int x) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
} // nextPow2
template <bool isFinalStage>
CUDA_CALLABLE void startKernel(const uint64_t* inputData,
uint64_t* outputData,
uint64_t* outputDataPa,
uint64_t* intermediateData,
unsigned int words_per_vector, // m / 64
int variablesCount, // number of variables in a query
int configs_per_query, /* number of configs*/
int startVariableId,
int streamId,
cudaStream_t stream,
int threadCount,
int parentBlockId) {
dim3 dimBlock(threadCount, 1, 1);
dim3 dimGrid(configs_per_query, 1, 1);
int smemSize = (threadCount <= 32) ? 2 * threadCount * sizeof(uint64_t) : threadCount * sizeof(uint64_t);
if (isFinalStage) {
smemSize *= 2;
switch (threadCount) {
case 512:
counts<uint64_t, 512, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 256:
counts<uint64_t, 256, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 128:
counts<uint64_t, 128, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 64:
counts<uint64_t, 64, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 32:
counts<uint64_t, 32, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 16:
counts<uint64_t, 16, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 8:
counts<uint64_t, 8, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 4:
counts<uint64_t, 4, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 2:
counts<uint64_t, 2, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 1:
counts<uint64_t, 1, false, isFinalStage><<<dimGrid, dimBlock, smemSize>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
default:
printf("Unsupported thread count. Exiting.\n");
}
} else {
switch (threadCount) {
case 512:
counts<uint64_t, 512, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 256:
counts<uint64_t, 256, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 128:
counts<uint64_t, 128, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 64:
counts<uint64_t, 64, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 32:
counts<uint64_t, 32, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 16:
counts<uint64_t, 16, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 8:
counts<uint64_t, 8, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 4:
counts<uint64_t, 4, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 2:
counts<uint64_t, 2, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
case 1:
counts<uint64_t, 1, false, isFinalStage><<<dimGrid, dimBlock, smemSize, stream>>>(
inputData, outputData, outputDataPa, intermediateData, words_per_vector, variablesCount,
configs_per_query, startVariableId, streamId, stream, parentBlockId);
break;
default:
printf("Unsupported thread count. Exiting.\n");
}
}
cucheck_dev(cudaGetLastError());
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T> struct SharedMemory {
__device__ inline operator T*() {
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const {
extern __shared__ int __smem[];
return (T*)__smem;
}
}; // struct SharedMemory
__host__ void copyAritiesToDevice(int streamId,
const std::vector<uint64_t>& pArities,
const std::vector<uint64_t>& pAritiesPrefixProd,
const std::vector<uint64_t>& pAritiesPrefixSum) {
cucheck_dev( cudaMemcpyToSymbol(aritiesPtr_, pArities.data(),
pArities.size() * sizeof(uint64_t), streamId * sizeof(uint64_t) * 10) );
cucheck_dev( cudaMemcpyToSymbol(aritiesPrefixProdPtr_,
pAritiesPrefixProd.data(), pAritiesPrefixProd.size() * sizeof(uint64_t), streamId * sizeof(uint64_t) * 11) );
cucheck_dev( cudaMemcpyToSymbol(aritiesPrefixSumPtr_,
pAritiesPrefixSum.data(), pAritiesPrefixSum.size() * sizeof(uint64_t), streamId * sizeof(uint64_t) * 10) );
} // m_copyAritiesToDevice__
template <class T, unsigned int blockSize, bool nIsPow2, bool isFinalStage>
__global__ void counts(const T* inputData,
T* outputData,
T* outputDataPa,
T* intermediateData,
unsigned int words_per_vector, // m / 64
int variablesCount, // number of variables in a query
int configs_per_query, /* number of configs*/
int startVariableId,
int streamId,
cudaStream_t stream,
int parentBlockId) {
//TODO: we don't really need two shared mems.
T* sDataPa = SharedMemory<T>();
T* sDataTot = &sDataPa[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize + threadIdx.x;
unsigned int word_index = i % blockSize; // can't this be tid?
int intermediateResultIndex;
//TODO: remove the below constant
if (isFinalStage) {
intermediateResultIndex = (streamId * words_per_vector * 32) + (parentBlockId * words_per_vector) + word_index;
} else {
intermediateResultIndex = (streamId * words_per_vector * 32) + (blockIdx.x * words_per_vector) + word_index;
}
T totSum = 0;
T paSum = 0;
T xiBitVect;
int temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][startVariableId]) % aritiesPtr_[streamId][startVariableId]);
T paBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][startVariableId] + temp) * words_per_vector) + word_index);
// running sum for all word slices
for (int p = startVariableId + 1; p < min(5 + startVariableId, variablesCount-1); ++p) {
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][p]) % aritiesPtr_[streamId][p]);
paBitVect = paBitVect & *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][p] + temp) * words_per_vector) + word_index);
}
if (isFinalStage) {
paBitVect &= intermediateData[intermediateResultIndex];
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][variablesCount-1]) % aritiesPtr_[streamId][variablesCount-1]);
xiBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][variablesCount-1] + temp) * words_per_vector) + word_index);
xiBitVect &= paBitVect;
totSum += __popcll(xiBitVect);
} else {
intermediateData[intermediateResultIndex] = paBitVect;
}
paSum += __popcll(paBitVect);
// ensure we don't read out of bounds -- this is optimized away for power of 2 sized arrays
if (nIsPow2 || (tid + blockSize < words_per_vector)) {
unsigned int word_index_upper_half = word_index + blockSize;
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][startVariableId]) % aritiesPtr_[streamId][startVariableId]);
paBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][startVariableId] + temp) * words_per_vector) + word_index_upper_half);
for (int p = startVariableId + 1; p < min(5 + startVariableId, variablesCount-1); p++) {
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][p]) % aritiesPtr_[streamId][p]);
paBitVect = paBitVect & *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][p] + temp) * words_per_vector) + word_index_upper_half);
}
if (isFinalStage) {
paBitVect &= intermediateData[intermediateResultIndex + blockSize];
temp = ((blockIdx.x / aritiesPrefixProdPtr_[streamId][variablesCount-1]) % aritiesPtr_[streamId][variablesCount-1]);
xiBitVect = *(((uint64_t*)inputData) + ((aritiesPrefixSumPtr_[streamId][variablesCount-1] + temp) * words_per_vector) + word_index_upper_half);
xiBitVect &= paBitVect;
totSum += __popcll(xiBitVect);
} else {
intermediateData[intermediateResultIndex + blockSize] = paBitVect;
}
paSum += __popcll(paBitVect);
}
// each thread puts its local sum into shared memory
if (isFinalStage) {
sDataTot[tid] = totSum;
}
sDataPa[tid] = paSum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 256];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 128];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 64];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 ) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) {
if (isFinalStage) {
totSum += sDataTot[tid + 32];
}
paSum += sDataPa[tid + 32];
}
// Reduce final warp using shuffle
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
if (isFinalStage) {
totSum += __shfl_down_sync(0xFFFFFFFF, totSum, offset);
}
paSum += __shfl_down_sync(0xFFFFFFFF, paSum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 32];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 16];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 8];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 4];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 2];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1)) {
if (isFinalStage) {
sDataTot[tid] = totSum = totSum + sDataTot[tid + 1];
}
sDataPa[tid] = paSum = paSum + sDataPa[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) {
//TODO: bypass this logic if number of variables is already small
if (isFinalStage) {
//TODO: use global constant here or something else?
outputData[(streamId*1024) + (parentBlockId * 32) + blockIdx.x] = totSum;
outputDataPa[(streamId*1024) + (parentBlockId * 32) + blockIdx.x] = paSum;
} else if (paSum > 0) {
int threadCount = nextPow2((words_per_vector + 1) >> 1);
startKernel<true>(inputData,
outputData,
outputDataPa,
intermediateData,
words_per_vector,
variablesCount, // number of variables in a query
32, /* number of configs*/
5, //TODO: make it safer
0,
stream,
threadCount,
blockIdx.x);
//TODO: memset 0 results here
}
}
__syncthreads();
} // counts
inline bool isPow2(unsigned int x) { return ((x & (x - 1)) == 0); }
void cudaCallBlockCount(const uint block_count,
const uint per_block_thread_count,
const uint words_per_vector,
const uint variablesCount,
const uint configs_per_query,
const uint64_t *bvectorsPtr,
uint64_t *results,
uint64_t *resultsPa,
uint64_t *intermediateData,
int streamId,
cudaStream_t stream) {
int threadCount = nextPow2((words_per_vector + 1) >> 1);
startKernel<false>(bvectorsPtr, results, resultsPa, intermediateData, words_per_vector,
variablesCount, configs_per_query, 0, streamId, stream, threadCount, -1);
cucheck_dev( cudaStreamSynchronize(stream) );
} // cudaCallBlockCount
#endif // GPU_UTIL_CU
|
418407ff39437a5ac8c3ae2dd313f0179298cdbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <prefixSum.cuh>
const int NUM_THREADS = 128;
// Function prototypes
__global__
void uniformAdd(int *outputArray, int numElements, int *INCR);
__global__
void blockPrefixSum(int *g_idata, int *g_odata, int n, int *SUM);
int manageMemoryForPrefixSum(int numElements) {
int blocksPerGridL1 = 1 + (numElements - 1) / (NUM_THREADS * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (NUM_THREADS * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (NUM_THREADS * 2);
double nvidiaFreeMemory = getSizeOfNvidiaFreeMemory();
int clean = 1;
if (blocksPerGridL1 != 1 && blocksPerGridL2 == 1) {
double occupancy = ((blocksPerGridL1 * 2) + (NUM_THREADS * 2 - 1)) * sizeof(int);
if((nvidiaFreeMemory - occupancy) < 0)
clean = 0;
} else if(blocksPerGridL1 != 1 && blocksPerGridL3 == 1) {
double occupancy = ((blocksPerGridL1 + (NUM_THREADS * 2) + (NUM_THREADS * 2 - 1)) * sizeof(int));
if((nvidiaFreeMemory - occupancy) < 0)
clean = 0;
}
return clean;
}
void prefixSum(int *d_input, int *d_cscColPtr, int numElements) {
hipError_t err = hipSuccess;
size_t size = numElements * sizeof(int);
int *d_SUMS_LEVEL1 = NULL;
int *d_INCR_LEVEL1 = NULL;
int *d_SUMS_LEVEL2 = NULL;
int *d_INCR_LEVEL2 = NULL;
// The correct level is going to be where the SUMS array can be prescanned with only one block
int blocksPerGridL1 = 1 + (numElements - 1) / (NUM_THREADS * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (NUM_THREADS * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (NUM_THREADS * 2);
if(blocksPerGridL1 == 1) {
hipLaunchKernelGGL(( blockPrefixSum), dim3(blocksPerGridL1), dim3(NUM_THREADS), 0, 0, d_input, d_cscColPtr, numElements, NULL);
} else if (blocksPerGridL2 == 1) {
err = hipMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = hipMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
hipLaunchKernelGGL(( blockPrefixSum), dim3(blocksPerGridL1), dim3(NUM_THREADS), 0, 0, d_input, d_cscColPtr, numElements, d_SUMS_LEVEL1);
hipLaunchKernelGGL(( blockPrefixSum), dim3(blocksPerGridL2), dim3(NUM_THREADS), 0, 0, d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, NULL);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL1), dim3(NUM_THREADS), 0, 0, d_cscColPtr, numElements, d_INCR_LEVEL1);
hipDeviceSynchronize();
} else if (blocksPerGridL3 == 1) {
err = hipMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = hipMalloc((void**) &d_SUMS_LEVEL2, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL2");
err = hipMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
err = hipMalloc((void**) &d_INCR_LEVEL2, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
hipLaunchKernelGGL(( blockPrefixSum), dim3(blocksPerGridL1), dim3(NUM_THREADS), 0, 0, d_input, d_cscColPtr, numElements, d_SUMS_LEVEL1);
hipLaunchKernelGGL(( blockPrefixSum), dim3(blocksPerGridL2), dim3(NUM_THREADS), 0, 0, d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, d_SUMS_LEVEL2);
hipLaunchKernelGGL(( blockPrefixSum), dim3(blocksPerGridL3), dim3(NUM_THREADS), 0, 0, d_SUMS_LEVEL2, d_INCR_LEVEL2, blocksPerGridL2, NULL);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL2), dim3(NUM_THREADS), 0, 0, d_INCR_LEVEL1, blocksPerGridL1, d_INCR_LEVEL2);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL1), dim3(NUM_THREADS), 0, 0, d_cscColPtr, numElements, d_INCR_LEVEL1);
hipDeviceSynchronize();
}else {
printf("The array of length = %d is to large for a level 3 FULL prescan\n", numElements);
}
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// Only need to free these arrays if they were allocated
if(blocksPerGridL2 == 1 || blocksPerGridL3 == 1){
err = hipFree(d_SUMS_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL1");
err = hipFree(d_INCR_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL1");
}
if(blocksPerGridL3 == 1){
err = hipFree(d_SUMS_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL2");
err = hipFree(d_INCR_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL2");
}
}
__global__
void blockPrefixSum(int *g_idata, int *g_odata, int n, int *SUM) {
__shared__ int temp[NUM_THREADS * 2 + (NUM_THREADS)];
int thid = threadIdx.x;
int offset = 1;
int blockOffset = NUM_THREADS * blockIdx.x * 2;
// Create the correct offsets for BCAO
int ai = thid;
int bi = thid + NUM_THREADS;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Copy the correct elements form the global array
if (blockOffset + ai < n) {
// Load input into shared memory
temp[ai + bankOffsetA] = g_idata[blockOffset + ai];
}
if (blockOffset + bi < n) {
// Load input into shared memory
temp[bi + bankOffsetB] = g_idata[blockOffset + bi];
}
for (int d = NUM_THREADS; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
if(SUM != NULL) {
// If doing a FULL scan, save the last value in the SUMS array for later processing
SUM[blockIdx.x] = temp[(NUM_THREADS * 2) - 1 + CONFLICT_FREE_OFFSET((NUM_THREADS * 2) - 1)];
}
// clear the last element
temp[(NUM_THREADS * 2) - 1 + CONFLICT_FREE_OFFSET((NUM_THREADS * 2) - 1)] = 0;
}
// Traverse down tree & build scan
for (int d = 1; d < NUM_THREADS * 2; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
// Copy the new array back to global array
__syncthreads();
if (blockOffset + ai < n) {
// write results to device memory
g_odata[blockOffset + ai] = temp[ai + bankOffsetA];
}
if (blockOffset + bi < n) {
// write results to device memory
g_odata[blockOffset + bi] = temp[bi + bankOffsetB];
}
}
// Takes the output array and for each block i, adds value i from INCR array to every element
__global__
void uniformAdd(int *outputArray, int numElements, int *INCR) {
int index = threadIdx.x + (2 * NUM_THREADS) * blockIdx.x;
int valueToAdd = INCR[blockIdx.x];
if (index < numElements){
outputArray[index] += valueToAdd;
}
if (index + NUM_THREADS < numElements){
outputArray[index + NUM_THREADS] += valueToAdd;
}
} | 418407ff39437a5ac8c3ae2dd313f0179298cdbf.cu | #include <prefixSum.cuh>
const int NUM_THREADS = 128;
// Function prototypes
__global__
void uniformAdd(int *outputArray, int numElements, int *INCR);
__global__
void blockPrefixSum(int *g_idata, int *g_odata, int n, int *SUM);
int manageMemoryForPrefixSum(int numElements) {
int blocksPerGridL1 = 1 + (numElements - 1) / (NUM_THREADS * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (NUM_THREADS * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (NUM_THREADS * 2);
double nvidiaFreeMemory = getSizeOfNvidiaFreeMemory();
int clean = 1;
if (blocksPerGridL1 != 1 && blocksPerGridL2 == 1) {
double occupancy = ((blocksPerGridL1 * 2) + (NUM_THREADS * 2 - 1)) * sizeof(int);
if((nvidiaFreeMemory - occupancy) < 0)
clean = 0;
} else if(blocksPerGridL1 != 1 && blocksPerGridL3 == 1) {
double occupancy = ((blocksPerGridL1 + (NUM_THREADS * 2) + (NUM_THREADS * 2 - 1)) * sizeof(int));
if((nvidiaFreeMemory - occupancy) < 0)
clean = 0;
}
return clean;
}
void prefixSum(int *d_input, int *d_cscColPtr, int numElements) {
cudaError_t err = cudaSuccess;
size_t size = numElements * sizeof(int);
int *d_SUMS_LEVEL1 = NULL;
int *d_INCR_LEVEL1 = NULL;
int *d_SUMS_LEVEL2 = NULL;
int *d_INCR_LEVEL2 = NULL;
// The correct level is going to be where the SUMS array can be prescanned with only one block
int blocksPerGridL1 = 1 + (numElements - 1) / (NUM_THREADS * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (NUM_THREADS * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (NUM_THREADS * 2);
if(blocksPerGridL1 == 1) {
blockPrefixSum<<<blocksPerGridL1, NUM_THREADS>>>(d_input, d_cscColPtr, numElements, NULL);
} else if (blocksPerGridL2 == 1) {
err = cudaMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = cudaMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
blockPrefixSum<<<blocksPerGridL1, NUM_THREADS>>>(d_input, d_cscColPtr, numElements, d_SUMS_LEVEL1);
blockPrefixSum<<<blocksPerGridL2, NUM_THREADS>>>(d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, NULL);
uniformAdd<<<blocksPerGridL1, NUM_THREADS>>>(d_cscColPtr, numElements, d_INCR_LEVEL1);
cudaDeviceSynchronize();
} else if (blocksPerGridL3 == 1) {
err = cudaMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = cudaMalloc((void**) &d_SUMS_LEVEL2, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL2");
err = cudaMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
err = cudaMalloc((void**) &d_INCR_LEVEL2, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
blockPrefixSum<<<blocksPerGridL1, NUM_THREADS>>>(d_input, d_cscColPtr, numElements, d_SUMS_LEVEL1);
blockPrefixSum<<<blocksPerGridL2, NUM_THREADS>>>(d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, d_SUMS_LEVEL2);
blockPrefixSum<<<blocksPerGridL3, NUM_THREADS>>>(d_SUMS_LEVEL2, d_INCR_LEVEL2, blocksPerGridL2, NULL);
uniformAdd<<<blocksPerGridL2, NUM_THREADS>>>(d_INCR_LEVEL1, blocksPerGridL1, d_INCR_LEVEL2);
uniformAdd<<<blocksPerGridL1, NUM_THREADS>>>(d_cscColPtr, numElements, d_INCR_LEVEL1);
cudaDeviceSynchronize();
}else {
printf("The array of length = %d is to large for a level 3 FULL prescan\n", numElements);
}
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// Only need to free these arrays if they were allocated
if(blocksPerGridL2 == 1 || blocksPerGridL3 == 1){
err = cudaFree(d_SUMS_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL1");
err = cudaFree(d_INCR_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL1");
}
if(blocksPerGridL3 == 1){
err = cudaFree(d_SUMS_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL2");
err = cudaFree(d_INCR_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL2");
}
}
__global__
void blockPrefixSum(int *g_idata, int *g_odata, int n, int *SUM) {
__shared__ int temp[NUM_THREADS * 2 + (NUM_THREADS)];
int thid = threadIdx.x;
int offset = 1;
int blockOffset = NUM_THREADS * blockIdx.x * 2;
// Create the correct offsets for BCAO
int ai = thid;
int bi = thid + NUM_THREADS;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Copy the correct elements form the global array
if (blockOffset + ai < n) {
// Load input into shared memory
temp[ai + bankOffsetA] = g_idata[blockOffset + ai];
}
if (blockOffset + bi < n) {
// Load input into shared memory
temp[bi + bankOffsetB] = g_idata[blockOffset + bi];
}
for (int d = NUM_THREADS; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
if(SUM != NULL) {
// If doing a FULL scan, save the last value in the SUMS array for later processing
SUM[blockIdx.x] = temp[(NUM_THREADS * 2) - 1 + CONFLICT_FREE_OFFSET((NUM_THREADS * 2) - 1)];
}
// clear the last element
temp[(NUM_THREADS * 2) - 1 + CONFLICT_FREE_OFFSET((NUM_THREADS * 2) - 1)] = 0;
}
// Traverse down tree & build scan
for (int d = 1; d < NUM_THREADS * 2; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
// Copy the new array back to global array
__syncthreads();
if (blockOffset + ai < n) {
// write results to device memory
g_odata[blockOffset + ai] = temp[ai + bankOffsetA];
}
if (blockOffset + bi < n) {
// write results to device memory
g_odata[blockOffset + bi] = temp[bi + bankOffsetB];
}
}
// Takes the output array and for each block i, adds value i from INCR array to every element
__global__
void uniformAdd(int *outputArray, int numElements, int *INCR) {
int index = threadIdx.x + (2 * NUM_THREADS) * blockIdx.x;
int valueToAdd = INCR[blockIdx.x];
if (index < numElements){
outputArray[index] += valueToAdd;
}
if (index + NUM_THREADS < numElements){
outputArray[index + NUM_THREADS] += valueToAdd;
}
} |
44975b0d660972e6e2738f779e6d9eaf1a35e652.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cudaConvolution_Shared_Memory_3D_Float_Valid_Kernel.h>
// ========================================================
// KERNEL
// ========================================================
__global__ void cudaConvolution_Shared_Memory_3D_Float_Valid_X_Kernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMY][BLOCKDIMX * 3];
const int baseX = (blockIdx.x - 1) * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
// out += baseZ * ((dimx) * dimy) + baseY * (dimx) + baseX;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX] = (baseX + BLOCKDIMX < dimx) ? in[BLOCKDIMX] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x] = (baseX > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + 2 * BLOCKDIMX] = (dimx - baseX > 2 * BLOCKDIMX) ? in[2 * BLOCKDIMX] : 0.0f;
__syncthreads();
if (baseX + BLOCKDIMX >= size2 && baseX + BLOCKDIMX + size2 < dimx)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX + i];
out[BLOCKDIMX] = sum;
}
}
}
__global__ void cudaConvolution_Shared_Memory_3D_Float_Valid_Y_Kernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMX][BLOCKDIMY * 3];
const int baseX = blockIdx.x * blockDim.x + threadIdx.x,
baseY = (blockIdx.y - 1) * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * (dimx * (dimy - 2 * size2)) + (baseY - size2) * dimx + baseX;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + BLOCKDIMY] = (baseY + BLOCKDIMY < dimy) ? in[BLOCKDIMY * dimx] : 0.0f;
s_Data[threadIdx.z][threadIdx.x][threadIdx.y] = (baseY > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + 2 * BLOCKDIMY] = (dimy - baseY > 2 * BLOCKDIMY) ? in[2 * BLOCKDIMY * dimx] : 0.0f;
__syncthreads();
if (baseY + BLOCKDIMY >= size2 && baseY + BLOCKDIMY + size2 < dimy)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.x][threadIdx.y + BLOCKDIMY + i];
out[BLOCKDIMY * dimx] = sum;
}
}
}
__global__ void cudaConvolution_Shared_Memory_3D_Float_Valid_Z_Kernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMY][BLOCKDIMX][BLOCKDIMZ * 3];
const int baseX = blockIdx.x * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = (blockIdx.z - 1) * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += (baseZ - size2) * (dimx * dimy) + baseY * dimx + baseX;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.y][threadIdx.x][threadIdx.z + BLOCKDIMZ] = (baseZ + BLOCKDIMZ < dimz) ? in[BLOCKDIMZ * dimx * dimy] : 0.0f;
s_Data[threadIdx.y][threadIdx.x][threadIdx.z] = (baseZ > 0) ? in[0] : 0.0f;
s_Data[threadIdx.y][threadIdx.x][threadIdx.z + 2 * BLOCKDIMZ] = (dimz - baseZ > 2 * BLOCKDIMZ) ? in[2 * BLOCKDIMZ * dimx * dimy] : 0.0f;
__syncthreads();
if (baseZ + BLOCKDIMZ >= size2 && baseZ + BLOCKDIMZ + size2 < dimz)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.y][threadIdx.x][threadIdx.z + BLOCKDIMZ + i];
out[BLOCKDIMZ * dimx * dimy] = sum;
}
}
}
void cudaConvolution_Shared_Memory_3D_Float_Valid_X(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_Shared_Memory_3D_Float_Valid_X_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
hipDeviceSynchronize();
}
// #define CUDA_MEM_CPY_TO_SYMBOL_FLOAT(_dst, _src, _size) hipMemcpyToSymbol(_dst, _src, _size*sizeof(float));
/*
__global__ void cudaConvolution3DValidXKernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMY][BLOCKDIMX * 3];
const int baseX = (blockIdx.x - 1) * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX] = (baseX + BLOCKDIMX < dimx) ? in[BLOCKDIMX] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x] = (baseX > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + 2 * BLOCKDIMX] = (dimx - baseX > 2 * BLOCKDIMX) ? in[2 * BLOCKDIMX] : 0.0f;
__syncthreads();
if (baseX + BLOCKDIMX >= size2 && baseX + BLOCKDIMX + size2 < dimx)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX + i];
out[BLOCKDIMX] = sum;
}
}
}
*/ | 44975b0d660972e6e2738f779e6d9eaf1a35e652.cu |
#include <cuda_runtime.h>
#include <cuda.h>
#include <cudaConvolution_Shared_Memory_3D_Float_Valid_Kernel.h>
// ========================================================
// KERNEL
// ========================================================
__global__ void cudaConvolution_Shared_Memory_3D_Float_Valid_X_Kernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMY][BLOCKDIMX * 3];
const int baseX = (blockIdx.x - 1) * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
// out += baseZ * ((dimx) * dimy) + baseY * (dimx) + baseX;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX] = (baseX + BLOCKDIMX < dimx) ? in[BLOCKDIMX] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x] = (baseX > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + 2 * BLOCKDIMX] = (dimx - baseX > 2 * BLOCKDIMX) ? in[2 * BLOCKDIMX] : 0.0f;
__syncthreads();
if (baseX + BLOCKDIMX >= size2 && baseX + BLOCKDIMX + size2 < dimx)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX + i];
out[BLOCKDIMX] = sum;
}
}
}
__global__ void cudaConvolution_Shared_Memory_3D_Float_Valid_Y_Kernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMX][BLOCKDIMY * 3];
const int baseX = blockIdx.x * blockDim.x + threadIdx.x,
baseY = (blockIdx.y - 1) * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * (dimx * (dimy - 2 * size2)) + (baseY - size2) * dimx + baseX;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + BLOCKDIMY] = (baseY + BLOCKDIMY < dimy) ? in[BLOCKDIMY * dimx] : 0.0f;
s_Data[threadIdx.z][threadIdx.x][threadIdx.y] = (baseY > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + 2 * BLOCKDIMY] = (dimy - baseY > 2 * BLOCKDIMY) ? in[2 * BLOCKDIMY * dimx] : 0.0f;
__syncthreads();
if (baseY + BLOCKDIMY >= size2 && baseY + BLOCKDIMY + size2 < dimy)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.x][threadIdx.y + BLOCKDIMY + i];
out[BLOCKDIMY * dimx] = sum;
}
}
}
__global__ void cudaConvolution_Shared_Memory_3D_Float_Valid_Z_Kernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMY][BLOCKDIMX][BLOCKDIMZ * 3];
const int baseX = blockIdx.x * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = (blockIdx.z - 1) * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += (baseZ - size2) * (dimx * dimy) + baseY * dimx + baseX;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.y][threadIdx.x][threadIdx.z + BLOCKDIMZ] = (baseZ + BLOCKDIMZ < dimz) ? in[BLOCKDIMZ * dimx * dimy] : 0.0f;
s_Data[threadIdx.y][threadIdx.x][threadIdx.z] = (baseZ > 0) ? in[0] : 0.0f;
s_Data[threadIdx.y][threadIdx.x][threadIdx.z + 2 * BLOCKDIMZ] = (dimz - baseZ > 2 * BLOCKDIMZ) ? in[2 * BLOCKDIMZ * dimx * dimy] : 0.0f;
__syncthreads();
if (baseZ + BLOCKDIMZ >= size2 && baseZ + BLOCKDIMZ + size2 < dimz)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.y][threadIdx.x][threadIdx.z + BLOCKDIMZ + i];
out[BLOCKDIMZ * dimx * dimy] = sum;
}
}
}
void cudaConvolution_Shared_Memory_3D_Float_Valid_X(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_Shared_Memory_3D_Float_Valid_X_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
cudaThreadSynchronize();
}
// #define CUDA_MEM_CPY_TO_SYMBOL_FLOAT(_dst, _src, _size) cudaMemcpyToSymbol(_dst, _src, _size*sizeof(float));
/*
__global__ void cudaConvolution3DValidXKernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMY][BLOCKDIMX * 3];
const int baseX = (blockIdx.x - 1) * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX] = (baseX + BLOCKDIMX < dimx) ? in[BLOCKDIMX] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x] = (baseX > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + 2 * BLOCKDIMX] = (dimx - baseX > 2 * BLOCKDIMX) ? in[2 * BLOCKDIMX] : 0.0f;
__syncthreads();
if (baseX + BLOCKDIMX >= size2 && baseX + BLOCKDIMX + size2 < dimx)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX + i];
out[BLOCKDIMX] = sum;
}
}
}
*/ |
82e87c7f15fa31751c043321730f872041797ba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 2048
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y ) {
const float scale = 2.0;
float xm = (float) DIM/2.0;
float jx = scale * (x/xm-1);
float jy = scale * (1-y/xm);
hipComplex c(-0.8f, 0.156f);
hipComplex z(jx, jy);
int maxiter = 400;
int i;
for (i = 0; i < maxiter; i++) {
z = z * z + c;
if (z.magnitude2() > 4)
return 0; /* color will be black */
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
int r = 255;
int g = 0;
int b = 0;
ptr[offset*4 + 0] = r * juliaValue;
ptr[offset*4 + 1] = g * juliaValue;
ptr[offset*4 + 2] = b * juliaValue;
ptr[offset*4 + 3] = 255; /* Transparency? */
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap );
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
FILE *file = fopen("julia_gpu.out","w");
int dim = DIM;
fwrite(&dim,1,sizeof(int),file);
fwrite(bitmap.get_ptr(),4*DIM*DIM,sizeof(unsigned char),file);
fclose(file);
hipDeviceReset();
// bitmap.display_and_exit();
}
| 82e87c7f15fa31751c043321730f872041797ba1.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 2048
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y ) {
const float scale = 2.0;
float xm = (float) DIM/2.0;
float jx = scale * (x/xm-1);
float jy = scale * (1-y/xm);
cuComplex c(-0.8f, 0.156f);
cuComplex z(jx, jy);
int maxiter = 400;
int i;
for (i = 0; i < maxiter; i++) {
z = z * z + c;
if (z.magnitude2() > 4)
return 0; /* color will be black */
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
int r = 255;
int g = 0;
int b = 0;
ptr[offset*4 + 0] = r * juliaValue;
ptr[offset*4 + 1] = g * juliaValue;
ptr[offset*4 + 2] = b * juliaValue;
ptr[offset*4 + 3] = 255; /* Transparency? */
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>( dev_bitmap );
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
FILE *file = fopen("julia_gpu.out","w");
int dim = DIM;
fwrite(&dim,1,sizeof(int),file);
fwrite(bitmap.get_ptr(),4*DIM*DIM,sizeof(unsigned char),file);
fclose(file);
cudaDeviceReset();
// bitmap.display_and_exit();
}
|
3ce37e2c578f56ed32fb360ad28cd97e620ad935.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
// symbol to be automatically resolved by PyTorch libs
namespace multihead_attn {
namespace fused_softmax {
namespace additive_mask_softmax_dropout {
std::vector<torch::Tensor> fwd_cuda(bool is_training, int heads,
torch::Tensor const &input,
const half *pad_mask, float dropout_prob) {
const int attn_batches = input.size(0);
const int sequences = attn_batches / heads;
const int q_seq_len = input.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = input.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor softmax_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *input_ptr = static_cast<void *>(input.data_ptr());
void *softmax_results_ptr = static_cast<void *>(softmax_results.data_ptr());
// Padded Softmax
[[maybe_unused]] bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), k_seq_len, k_seq_len,
attn_batches * q_seq_len);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
if (is_training) {
// use at:: function so that C++ version generates the same random mask as
// python version
auto dropout_tuple =
at::_fused_dropout(softmax_results, 1.0f - dropout_prob);
dropout_results = std::get<0>(dropout_tuple);
dropout_mask = std::get<1>(dropout_tuple);
}
// Matmul2
return {dropout_results, dropout_mask, softmax_results};
}
torch::Tensor bwd_cuda(int heads, torch::Tensor const &output_grads,
torch::Tensor const &softmax_results,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int attn_batches = output_grads.size(0);
const int q_seq_len = output_grads.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
// torch::Tensor input_grads = torch::empty_like(output_grads);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_stream<half, half, float, false>(
static_cast<half *>(output_grads.data_ptr()),
static_cast<half *>(output_grads.data_ptr()),
reinterpret_cast<half const *>(softmax_results.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len, stream);
// backward pass is completely in-place
return output_grads;
}
} // namespace additive_mask_softmax_dropout
} // namespace fused_softmax
} // namespace multihead_attn
| 3ce37e2c578f56ed32fb360ad28cd97e620ad935.cu | #include <iostream>
#include <math.h>
#include <vector>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
// symbol to be automatically resolved by PyTorch libs
namespace multihead_attn {
namespace fused_softmax {
namespace additive_mask_softmax_dropout {
std::vector<torch::Tensor> fwd_cuda(bool is_training, int heads,
torch::Tensor const &input,
const half *pad_mask, float dropout_prob) {
const int attn_batches = input.size(0);
const int sequences = attn_batches / heads;
const int q_seq_len = input.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = input.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor softmax_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *input_ptr = static_cast<void *>(input.data_ptr());
void *softmax_results_ptr = static_cast<void *>(softmax_results.data_ptr());
// Padded Softmax
[[maybe_unused]] bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), k_seq_len, k_seq_len,
attn_batches * q_seq_len);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
if (is_training) {
// use at:: function so that C++ version generates the same random mask as
// python version
auto dropout_tuple =
at::_fused_dropout(softmax_results, 1.0f - dropout_prob);
dropout_results = std::get<0>(dropout_tuple);
dropout_mask = std::get<1>(dropout_tuple);
}
// Matmul2
return {dropout_results, dropout_mask, softmax_results};
}
torch::Tensor bwd_cuda(int heads, torch::Tensor const &output_grads,
torch::Tensor const &softmax_results,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int attn_batches = output_grads.size(0);
const int q_seq_len = output_grads.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
// torch::Tensor input_grads = torch::empty_like(output_grads);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_stream<half, half, float, false>(
static_cast<half *>(output_grads.data_ptr()),
static_cast<half *>(output_grads.data_ptr()),
reinterpret_cast<half const *>(softmax_results.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len, stream);
// backward pass is completely in-place
return output_grads;
}
} // namespace additive_mask_softmax_dropout
} // namespace fused_softmax
} // namespace multihead_attn
|
cf3793ade921b74a38f9b2d6e9e4d95ac5eefc30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "custom_cuda_layers.h"
__global__ void param_update_kernel(const float* input, __half* output, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) { output[id] = (__half)input[id]; }
}
void launch_param_update(const float* input, __half* output, int size, hipStream_t stream)
{
int threads = 1024;
dim3 grid_dim((size - 1) / threads + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( param_update_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, input, output, size);
}
| cf3793ade921b74a38f9b2d6e9e4d95ac5eefc30.cu |
#include "custom_cuda_layers.h"
__global__ void param_update_kernel(const float* input, __half* output, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) { output[id] = (__half)input[id]; }
}
void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream)
{
int threads = 1024;
dim3 grid_dim((size - 1) / threads + 1);
dim3 block_dim(threads);
param_update_kernel<<<grid_dim, block_dim, 0, stream>>>(input, output, size);
}
|
90768c02359c1fdd821a3fb49c9f623bf1985654.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/factorization/factorization_kernels.hpp"
#include <ginkgo/core/base/array.hpp>
#include "core/components/prefix_sum_kernels.hpp"
#include "core/matrix/csr_builder.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/intrinsics.cuh"
#include "cuda/components/searching.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The factorization namespace.
*
* @ingroup factor
*/
namespace factorization {
constexpr int default_block_size{512};
#include "common/cuda_hip/factorization/factorization_kernels.hpp.inc"
template <typename ValueType, typename IndexType>
void add_diagonal_elements(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType>* mtx,
bool is_sorted)
{
// TODO: Runtime can be optimized by choosing a appropriate size for the
// subwarp dependent on the matrix properties
constexpr int subwarp_size = config::warp_size;
auto mtx_size = mtx->get_size();
auto num_rows = static_cast<IndexType>(mtx_size[0]);
auto num_cols = static_cast<IndexType>(mtx_size[1]);
size_type row_ptrs_size = num_rows + 1;
if (num_rows == 0) {
return;
}
array<IndexType> row_ptrs_addition(exec, row_ptrs_size);
array<bool> needs_change_host{exec->get_master(), 1};
needs_change_host.get_data()[0] = false;
array<bool> needs_change_device{exec, 1};
needs_change_device = needs_change_host;
auto cuda_old_values = as_cuda_type(mtx->get_const_values());
auto cuda_old_col_idxs = as_cuda_type(mtx->get_const_col_idxs());
auto cuda_old_row_ptrs = as_cuda_type(mtx->get_row_ptrs());
auto cuda_row_ptrs_add = as_cuda_type(row_ptrs_addition.get_data());
const auto block_dim = default_block_size;
const auto grid_dim =
static_cast<uint32>(ceildiv(num_rows, block_dim / subwarp_size));
if (is_sorted) {
hipLaunchKernelGGL(( kernel::find_missing_diagonal_elements<true, subwarp_size>)
, dim3(grid_dim), dim3(block_dim), 0, 0,
num_rows, num_cols, cuda_old_col_idxs, cuda_old_row_ptrs,
cuda_row_ptrs_add,
as_cuda_type(needs_change_device.get_data()));
} else {
hipLaunchKernelGGL(( kernel::find_missing_diagonal_elements<false, subwarp_size>)
, dim3(grid_dim), dim3(block_dim), 0, 0,
num_rows, num_cols, cuda_old_col_idxs, cuda_old_row_ptrs,
cuda_row_ptrs_add,
as_cuda_type(needs_change_device.get_data()));
}
needs_change_host = needs_change_device;
if (!needs_change_host.get_const_data()[0]) {
return;
}
components::prefix_sum(exec, cuda_row_ptrs_add, row_ptrs_size);
exec->synchronize();
auto total_additions =
exec->copy_val_to_host(cuda_row_ptrs_add + row_ptrs_size - 1);
size_type new_num_elems = static_cast<size_type>(total_additions) +
mtx->get_num_stored_elements();
array<ValueType> new_values{exec, new_num_elems};
array<IndexType> new_col_idxs{exec, new_num_elems};
auto cuda_new_values = as_cuda_type(new_values.get_data());
auto cuda_new_col_idxs = as_cuda_type(new_col_idxs.get_data());
// no empty kernel guard needed here, we exit earlier already
hipLaunchKernelGGL(( kernel::add_missing_diagonal_elements<subwarp_size>)
, dim3(grid_dim), dim3(block_dim), 0, 0, num_rows, cuda_old_values, cuda_old_col_idxs,
cuda_old_row_ptrs, cuda_new_values,
cuda_new_col_idxs, cuda_row_ptrs_add);
const auto grid_dim_row_ptrs_update =
static_cast<uint32>(ceildiv(num_rows, block_dim));
hipLaunchKernelGGL(( kernel::update_row_ptrs), dim3(grid_dim_row_ptrs_update), dim3(block_dim), 0, 0,
num_rows + 1, cuda_old_row_ptrs, cuda_row_ptrs_add);
matrix::CsrBuilder<ValueType, IndexType> mtx_builder{mtx};
mtx_builder.get_value_array() = std::move(new_values);
mtx_builder.get_col_idx_array() = std::move(new_col_idxs);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_ADD_DIAGONAL_ELEMENTS_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_row_ptrs_l_u(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
IndexType* l_row_ptrs, IndexType* u_row_ptrs)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const uint32 number_blocks =
ceildiv(num_rows, static_cast<size_type>(block_size));
const auto grid_dim = number_blocks;
if (num_rows > 0) {
hipLaunchKernelGGL(( kernel::count_nnz_per_l_u_row), dim3(grid_dim), dim3(block_size), 0, 0,
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(l_row_ptrs), as_cuda_type(u_row_ptrs));
}
components::prefix_sum(exec, l_row_ptrs, num_rows + 1);
components::prefix_sum(exec, u_row_ptrs, num_rows + 1);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_ROW_PTRS_L_U_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_l_u(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
matrix::Csr<ValueType, IndexType>* csr_l,
matrix::Csr<ValueType, IndexType>* csr_u)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const auto grid_dim = static_cast<uint32>(
ceildiv(num_rows, static_cast<size_type>(block_size)));
if (num_rows > 0) {
hipLaunchKernelGGL(( kernel::initialize_l_u), dim3(grid_dim), dim3(block_size), 0, 0,
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(csr_l->get_const_row_ptrs()),
as_cuda_type(csr_l->get_col_idxs()),
as_cuda_type(csr_l->get_values()),
as_cuda_type(csr_u->get_const_row_ptrs()),
as_cuda_type(csr_u->get_col_idxs()),
as_cuda_type(csr_u->get_values()));
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_L_U_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_row_ptrs_l(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
IndexType* l_row_ptrs)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const uint32 number_blocks =
ceildiv(num_rows, static_cast<size_type>(block_size));
const auto grid_dim = number_blocks;
if (num_rows > 0) {
hipLaunchKernelGGL(( kernel::count_nnz_per_l_row), dim3(grid_dim), dim3(block_size), 0, 0,
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(l_row_ptrs));
}
components::prefix_sum(exec, l_row_ptrs, num_rows + 1);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_ROW_PTRS_L_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_l(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
matrix::Csr<ValueType, IndexType>* csr_l, bool diag_sqrt)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const auto grid_dim = static_cast<uint32>(
ceildiv(num_rows, static_cast<size_type>(block_size)));
if (num_rows > 0) {
hipLaunchKernelGGL(( kernel::initialize_l), dim3(grid_dim), dim3(block_size), 0, 0,
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(csr_l->get_const_row_ptrs()),
as_cuda_type(csr_l->get_col_idxs()),
as_cuda_type(csr_l->get_values()), diag_sqrt);
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_L_KERNEL);
} // namespace factorization
} // namespace cuda
} // namespace kernels
} // namespace gko
| 90768c02359c1fdd821a3fb49c9f623bf1985654.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/factorization/factorization_kernels.hpp"
#include <ginkgo/core/base/array.hpp>
#include "core/components/prefix_sum_kernels.hpp"
#include "core/matrix/csr_builder.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/intrinsics.cuh"
#include "cuda/components/searching.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The factorization namespace.
*
* @ingroup factor
*/
namespace factorization {
constexpr int default_block_size{512};
#include "common/cuda_hip/factorization/factorization_kernels.hpp.inc"
template <typename ValueType, typename IndexType>
void add_diagonal_elements(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType>* mtx,
bool is_sorted)
{
// TODO: Runtime can be optimized by choosing a appropriate size for the
// subwarp dependent on the matrix properties
constexpr int subwarp_size = config::warp_size;
auto mtx_size = mtx->get_size();
auto num_rows = static_cast<IndexType>(mtx_size[0]);
auto num_cols = static_cast<IndexType>(mtx_size[1]);
size_type row_ptrs_size = num_rows + 1;
if (num_rows == 0) {
return;
}
array<IndexType> row_ptrs_addition(exec, row_ptrs_size);
array<bool> needs_change_host{exec->get_master(), 1};
needs_change_host.get_data()[0] = false;
array<bool> needs_change_device{exec, 1};
needs_change_device = needs_change_host;
auto cuda_old_values = as_cuda_type(mtx->get_const_values());
auto cuda_old_col_idxs = as_cuda_type(mtx->get_const_col_idxs());
auto cuda_old_row_ptrs = as_cuda_type(mtx->get_row_ptrs());
auto cuda_row_ptrs_add = as_cuda_type(row_ptrs_addition.get_data());
const auto block_dim = default_block_size;
const auto grid_dim =
static_cast<uint32>(ceildiv(num_rows, block_dim / subwarp_size));
if (is_sorted) {
kernel::find_missing_diagonal_elements<true, subwarp_size>
<<<grid_dim, block_dim>>>(
num_rows, num_cols, cuda_old_col_idxs, cuda_old_row_ptrs,
cuda_row_ptrs_add,
as_cuda_type(needs_change_device.get_data()));
} else {
kernel::find_missing_diagonal_elements<false, subwarp_size>
<<<grid_dim, block_dim>>>(
num_rows, num_cols, cuda_old_col_idxs, cuda_old_row_ptrs,
cuda_row_ptrs_add,
as_cuda_type(needs_change_device.get_data()));
}
needs_change_host = needs_change_device;
if (!needs_change_host.get_const_data()[0]) {
return;
}
components::prefix_sum(exec, cuda_row_ptrs_add, row_ptrs_size);
exec->synchronize();
auto total_additions =
exec->copy_val_to_host(cuda_row_ptrs_add + row_ptrs_size - 1);
size_type new_num_elems = static_cast<size_type>(total_additions) +
mtx->get_num_stored_elements();
array<ValueType> new_values{exec, new_num_elems};
array<IndexType> new_col_idxs{exec, new_num_elems};
auto cuda_new_values = as_cuda_type(new_values.get_data());
auto cuda_new_col_idxs = as_cuda_type(new_col_idxs.get_data());
// no empty kernel guard needed here, we exit earlier already
kernel::add_missing_diagonal_elements<subwarp_size>
<<<grid_dim, block_dim>>>(num_rows, cuda_old_values, cuda_old_col_idxs,
cuda_old_row_ptrs, cuda_new_values,
cuda_new_col_idxs, cuda_row_ptrs_add);
const auto grid_dim_row_ptrs_update =
static_cast<uint32>(ceildiv(num_rows, block_dim));
kernel::update_row_ptrs<<<grid_dim_row_ptrs_update, block_dim>>>(
num_rows + 1, cuda_old_row_ptrs, cuda_row_ptrs_add);
matrix::CsrBuilder<ValueType, IndexType> mtx_builder{mtx};
mtx_builder.get_value_array() = std::move(new_values);
mtx_builder.get_col_idx_array() = std::move(new_col_idxs);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_ADD_DIAGONAL_ELEMENTS_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_row_ptrs_l_u(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
IndexType* l_row_ptrs, IndexType* u_row_ptrs)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const uint32 number_blocks =
ceildiv(num_rows, static_cast<size_type>(block_size));
const auto grid_dim = number_blocks;
if (num_rows > 0) {
kernel::count_nnz_per_l_u_row<<<grid_dim, block_size, 0, 0>>>(
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(l_row_ptrs), as_cuda_type(u_row_ptrs));
}
components::prefix_sum(exec, l_row_ptrs, num_rows + 1);
components::prefix_sum(exec, u_row_ptrs, num_rows + 1);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_ROW_PTRS_L_U_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_l_u(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
matrix::Csr<ValueType, IndexType>* csr_l,
matrix::Csr<ValueType, IndexType>* csr_u)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const auto grid_dim = static_cast<uint32>(
ceildiv(num_rows, static_cast<size_type>(block_size)));
if (num_rows > 0) {
kernel::initialize_l_u<<<grid_dim, block_size, 0, 0>>>(
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(csr_l->get_const_row_ptrs()),
as_cuda_type(csr_l->get_col_idxs()),
as_cuda_type(csr_l->get_values()),
as_cuda_type(csr_u->get_const_row_ptrs()),
as_cuda_type(csr_u->get_col_idxs()),
as_cuda_type(csr_u->get_values()));
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_L_U_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_row_ptrs_l(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
IndexType* l_row_ptrs)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const uint32 number_blocks =
ceildiv(num_rows, static_cast<size_type>(block_size));
const auto grid_dim = number_blocks;
if (num_rows > 0) {
kernel::count_nnz_per_l_row<<<grid_dim, block_size, 0, 0>>>(
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(l_row_ptrs));
}
components::prefix_sum(exec, l_row_ptrs, num_rows + 1);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_ROW_PTRS_L_KERNEL);
template <typename ValueType, typename IndexType>
void initialize_l(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* system_matrix,
matrix::Csr<ValueType, IndexType>* csr_l, bool diag_sqrt)
{
const size_type num_rows{system_matrix->get_size()[0]};
const auto block_size = default_block_size;
const auto grid_dim = static_cast<uint32>(
ceildiv(num_rows, static_cast<size_type>(block_size)));
if (num_rows > 0) {
kernel::initialize_l<<<grid_dim, block_size, 0, 0>>>(
num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()),
as_cuda_type(system_matrix->get_const_col_idxs()),
as_cuda_type(system_matrix->get_const_values()),
as_cuda_type(csr_l->get_const_row_ptrs()),
as_cuda_type(csr_l->get_col_idxs()),
as_cuda_type(csr_l->get_values()), diag_sqrt);
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_FACTORIZATION_INITIALIZE_L_KERNEL);
} // namespace factorization
} // namespace cuda
} // namespace kernels
} // namespace gko
|
0dc98741e71d72f4d0ccf0049991eabad4691cbe.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file : constant_eg.cu
* @brief : Examples of using constant memory for CUDA
* @details : constant memory for CUDA examples
*
* @author : Ernest Yeung <[email protected]>
* @date : 20170103
* @ref : http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#device-memory-specifiers
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc constant_eg.cu -o constant_eg
*
* */
#include <iostream>
__constant__ float constData_global[256];
__device__ float devData;
__device__ float* devPointer;
int main(int argc, char* argv[]) {
float data_main[256];
/* "boilerplate" test values */
for (int idx=0; idx<256; idx++) {
data_main[idx] = ((float) idx+1);
}
hipMemcpyToSymbol(constData_global, data_main, sizeof(data_main));
float data_main1[256];
for (int idx=0; idx < 256; idx++) { std::cout << data_main1[idx] << " "; }
hipMemcpyFromSymbol(data_main1, constData_global, sizeof(data_main1) );
/* sanity check */
for (int idx=0; idx < 256; idx++) { std::cout << data_main1[idx] << " "; }
// __constant__ float constData_main[256]; // error: a "__constant__"
// variable declaration is not allowed inside a function body
// __device__ float devData; // error: a "__device__" variable declaration is not allowed inside a function body
float value = 3.14;
hipMemcpyToSymbol(devData, &value, sizeof(float));
float *ptr;
hipMalloc(&ptr, 256*sizeof(float));
hipMemcpyToSymbol(devPointer, &ptr, sizeof(ptr));
}
| 0dc98741e71d72f4d0ccf0049991eabad4691cbe.cu | /**
* @file : constant_eg.cu
* @brief : Examples of using constant memory for CUDA
* @details : constant memory for CUDA examples
*
* @author : Ernest Yeung <[email protected]>
* @date : 20170103
* @ref : http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#device-memory-specifiers
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc constant_eg.cu -o constant_eg
*
* */
#include <iostream>
__constant__ float constData_global[256];
__device__ float devData;
__device__ float* devPointer;
int main(int argc, char* argv[]) {
float data_main[256];
/* "boilerplate" test values */
for (int idx=0; idx<256; idx++) {
data_main[idx] = ((float) idx+1);
}
cudaMemcpyToSymbol(constData_global, data_main, sizeof(data_main));
float data_main1[256];
for (int idx=0; idx < 256; idx++) { std::cout << data_main1[idx] << " "; }
cudaMemcpyFromSymbol(data_main1, constData_global, sizeof(data_main1) );
/* sanity check */
for (int idx=0; idx < 256; idx++) { std::cout << data_main1[idx] << " "; }
// __constant__ float constData_main[256]; // error: a "__constant__"
// variable declaration is not allowed inside a function body
// __device__ float devData; // error: a "__device__" variable declaration is not allowed inside a function body
float value = 3.14;
cudaMemcpyToSymbol(devData, &value, sizeof(float));
float *ptr;
cudaMalloc(&ptr, 256*sizeof(float));
cudaMemcpyToSymbol(devPointer, &ptr, sizeof(ptr));
}
|
fa990780c1bab0a4606116c8a8abde0bf4f6197f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <optix.h>
#include "OptiX7Craft.h"
#include "helpers.h"
#include "GeometryData.h"
extern "C" {
__constant__ Params params;
}
extern "C" __global__ void __intersection__parallelogram()
{
const Parallelogram* floor = reinterpret_cast<Parallelogram*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin(), ray_tmax = optixGetRayTmax();
float3 n = make_float3( floor->plane );
float dt = dot(ray_dir, n );
float t = (floor->plane.w - dot(n, ray_orig))/dt;
if( t > ray_tmin && t < ray_tmax )
{
float3 p = ray_orig + ray_dir * t;
float3 vi = p - floor->anchor;
float a1 = dot(floor->v1, vi);
if(a1 >= 0 && a1 <= 1)
{
float a2 = dot(floor->v2, vi);
if(a2 >= 0 && a2 <= 1)
{
optixReportIntersection(
t,
0,
float3_as_args(n),
float_as_int( a1 ), float_as_int( a2 )
);
}
}
}
}
extern "C" __global__ void __intersection__sphere_shell()
{
const SphereShell* sphere_shell = reinterpret_cast<SphereShell*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin(), ray_tmax = optixGetRayTmax();
float3 O = ray_orig - sphere_shell->center;
float l = 1 / length(ray_dir);
float3 D = ray_dir * l;
float b = dot(O, D), sqr_b = b * b;
float O_dot_O = dot(O, O);
float radius1 = sphere_shell->radius1, radius2 = sphere_shell->radius2;
float sqr_radius1 = radius1 * radius1, sqr_radius2 = radius2*radius2;
// check if we are outside of outer sphere
if ( O_dot_O > sqr_radius2 + params.scene_epsilon )
{
if ( O_dot_O - sqr_b < sqr_radius2 - params.scene_epsilon )
{
float c = O_dot_O - sqr_radius2;
float root = sqr_b - c;
if (root > 0.0f) {
float t = -b - sqrtf( root );
float3 normal = (O + t * D) / radius2;
optixReportIntersection(
t * l,
HIT_OUTSIDE_FROM_OUTSIDE,
float3_as_args( normal ) );
}
}
}
// else we are inside of the outer sphere
else
{
float c = O_dot_O - sqr_radius1;
float root = b*b-c;
if ( root > 0.0f )
{
float t = -b - sqrtf( root );
// do we hit inner sphere from between spheres?
if ( t * l > ray_tmin && t * l < ray_tmax )
{
float3 normal = (O + t * D) / (-radius1);
optixReportIntersection(
t * l,
HIT_INSIDE_FROM_OUTSIDE,
float3_as_args( normal ) );
}
else
{
// do we hit inner sphere from within both spheres?
t = -b + (root > 0 ? sqrtf( root ) : 0.f);
if ( t * l > ray_tmin && t * l < ray_tmax )
{
float3 normal = ( O + t*D )/(-radius1);
optixReportIntersection(
t * l,
HIT_INSIDE_FROM_INSIDE,
float3_as_args( normal ) );
}
else
{
// do we hit outer sphere from between spheres?
c = O_dot_O - sqr_radius2;
root = b*b-c;
t = -b + (root > 0 ? sqrtf( root ) : 0.f);
float3 normal = ( O + t*D )/radius2;
optixReportIntersection(
t * l,
HIT_OUTSIDE_FROM_INSIDE,
float3_as_args( normal ) );
}
}
}
else
{
// do we hit outer sphere from between spheres?
c = O_dot_O - sqr_radius2;
root = b*b-c;
float t = -b + (root > 0 ? sqrtf( root ) : 0.f);
float3 normal = ( O + t*D )/radius2;
optixReportIntersection(
t * l,
HIT_OUTSIDE_FROM_INSIDE,
float3_as_args( normal ) );
}
}
}
#define float3_as_ints( u ) float_as_int( u.x ), float_as_int( u.y ), float_as_int( u.z )
static __device__ float3 get_normal(float t, float3 t0, float3 t1)
{
float3 neg = make_float3(t == t0.x ? 1 : 0, t == t0.y ? 1 : 0, t == t0.z ? 1 : 0);
float3 pos = make_float3(t == t1.x ? 1 : 0, t == t1.y ? 1 : 0, t == t1.z ? 1 : 0);
return pos - neg;
}
static __device__ float2 get_coord(float3 relativeCoord, float3 size)
{
float2 uv;
if (fabs(fabs(relativeCoord.z) - size.z) <= 1e-4) {
uv.x = (relativeCoord.x + size.x) / (2 * size.x);
uv.y = (relativeCoord.y + size.y) / (2 * size.y);
}
else if (fabs(fabs(relativeCoord.x) - size.x) <= 1e-4) {
uv.x = (relativeCoord.z + size.z) / (2 * size.z);
uv.y = (relativeCoord.y + size.y) / (2 * size.y);
}
else if (fabs(fabs(relativeCoord.y) - size.y) <= 1e-4) {
uv.x = (relativeCoord.z + size.z) / (2 * size.z);
uv.y = (relativeCoord.x + size.x) / (2 * size.x);
}
else {
uv.x = 0.0f;
uv.y = 0.0f;
}
return uv;
}
static __device__ cube_face get_face(float3 relativeCoord, float3 size)
{
if (fabs(relativeCoord.x - size.x) <= 1e-4) {
return x_up;
}
else if (fabs(relativeCoord.x + size.x) <= 1e-4) {
return x_down;
}
else if (fabs(relativeCoord.y - size.y) <= 1e-4) {
return y_up;
}
else if (fabs(relativeCoord.y + size.y) <= 1e-4) {
return y_down;
}
else if (fabs(relativeCoord.z - size.z) <= 1e-4) {
return z_up;
}
else if (fabs(relativeCoord.z + size.z) <= 1e-4) {
return z_down;
}
}
extern "C" __global__ void __intersection__cube()
{
const Cube* cube = reinterpret_cast<Cube*>(optixGetSbtDataPointer());
const float3 ray_origin = optixGetWorldRayOrigin();
float3 ray_direction = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin(), ray_tmax = optixGetRayTmax();
float3 cubemin = cube->center - cube->size;
float3 cubemax = cube->center + cube->size;
float3 t0 = (cubemin - ray_origin) / ray_direction;
float3 t1 = (cubemax - ray_origin) / ray_direction;
float3 near = fminf(t0, t1);
float3 far = fmaxf(t0, t1);
float tmin = fmaxf(near);
float tmax = fminf(far);
float eps = 0.0001f;
if (tmin <= tmax) {
bool check_second = true;
if (tmin >= ray_tmin && tmin <= ray_tmax) {
float3 normal = get_normal(tmin, t0, t1);
float3 coord = ray_origin + tmin * ray_direction;
// textureu, v
float3 relativeCoord = coord - cube->center;
float2 uv = get_coord(relativeCoord, cube->size);
cube_face face = get_face(relativeCoord, cube->size);
optixReportIntersection(
tmin,
HIT_FROM_OUTSIDE,
float3_as_args(normal),
float_as_int(uv.x),
float_as_int(uv.y),
face
);
check_second = false;
}
if (check_second) {
if (tmax >= ray_tmin && tmax <= ray_tmax) {
float3 normal = get_normal(tmax, t0, t1);
float3 coord = ray_origin + tmax * ray_direction;
// textureu, v
float3 relativeCoord = coord - cube->center;
float2 uv = get_coord(relativeCoord, cube->size);
cube_face face = get_face(relativeCoord, cube->size);
optixReportIntersection(
tmax,
HIT_FROM_INSIDE,
float3_as_args(normal),
float_as_int(uv.x),
float_as_int(uv.y),
face
);
}
}
}
}
extern "C" __global__ void __intersection__sphere()
{
const GeometryData::Sphere* hit_group_data = reinterpret_cast<GeometryData::Sphere*>(optixGetSbtDataPointer());
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
const float3 O = ray_orig - hit_group_data->center;
const float l = 1.0f / length(ray_dir);
const float3 D = ray_dir * l;
const float radius = hit_group_data->radius;
float b = dot(O, D);
float c = dot(O, O) - radius * radius;
float disc = b * b - c;
if (disc > 0.0f)
{
float sdisc = sqrtf(disc);
float root1 = (-b - sdisc);
float root11 = 0.0f;
bool check_second = true;
const bool do_refine = fabsf(root1) > (10.0f * radius);
if (do_refine)
{
// refine root1
float3 O1 = O + root1 * D;
b = dot(O1, D);
c = dot(O1, O1) - radius * radius;
disc = b * b - c;
if (disc > 0.0f)
{
sdisc = sqrtf(disc);
root11 = (-b - sdisc);
}
}
float t;
float3 normal;
t = (root1 + root11) * l;
if (t > ray_tmin && t < ray_tmax)
{
normal = (O + (root1 + root11) * D) / radius;
if (optixReportIntersection(t, 0, float3_as_ints(normal), float_as_int(radius)))
check_second = false;
}
if (check_second)
{
float root2 = (-b + sdisc) + (do_refine ? root1 : 0);
t = root2 * l;
normal = (O + root2 * D) / radius;
if (t > ray_tmin && t < ray_tmax)
optixReportIntersection(t, 0, float3_as_ints(normal), float_as_int(radius));
}
}
}
| fa990780c1bab0a4606116c8a8abde0bf4f6197f.cu | #include <optix.h>
#include "OptiX7Craft.h"
#include "helpers.h"
#include "GeometryData.h"
extern "C" {
__constant__ Params params;
}
extern "C" __global__ void __intersection__parallelogram()
{
const Parallelogram* floor = reinterpret_cast<Parallelogram*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin(), ray_tmax = optixGetRayTmax();
float3 n = make_float3( floor->plane );
float dt = dot(ray_dir, n );
float t = (floor->plane.w - dot(n, ray_orig))/dt;
if( t > ray_tmin && t < ray_tmax )
{
float3 p = ray_orig + ray_dir * t;
float3 vi = p - floor->anchor;
float a1 = dot(floor->v1, vi);
if(a1 >= 0 && a1 <= 1)
{
float a2 = dot(floor->v2, vi);
if(a2 >= 0 && a2 <= 1)
{
optixReportIntersection(
t,
0,
float3_as_args(n),
float_as_int( a1 ), float_as_int( a2 )
);
}
}
}
}
extern "C" __global__ void __intersection__sphere_shell()
{
const SphereShell* sphere_shell = reinterpret_cast<SphereShell*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin(), ray_tmax = optixGetRayTmax();
float3 O = ray_orig - sphere_shell->center;
float l = 1 / length(ray_dir);
float3 D = ray_dir * l;
float b = dot(O, D), sqr_b = b * b;
float O_dot_O = dot(O, O);
float radius1 = sphere_shell->radius1, radius2 = sphere_shell->radius2;
float sqr_radius1 = radius1 * radius1, sqr_radius2 = radius2*radius2;
// check if we are outside of outer sphere
if ( O_dot_O > sqr_radius2 + params.scene_epsilon )
{
if ( O_dot_O - sqr_b < sqr_radius2 - params.scene_epsilon )
{
float c = O_dot_O - sqr_radius2;
float root = sqr_b - c;
if (root > 0.0f) {
float t = -b - sqrtf( root );
float3 normal = (O + t * D) / radius2;
optixReportIntersection(
t * l,
HIT_OUTSIDE_FROM_OUTSIDE,
float3_as_args( normal ) );
}
}
}
// else we are inside of the outer sphere
else
{
float c = O_dot_O - sqr_radius1;
float root = b*b-c;
if ( root > 0.0f )
{
float t = -b - sqrtf( root );
// do we hit inner sphere from between spheres?
if ( t * l > ray_tmin && t * l < ray_tmax )
{
float3 normal = (O + t * D) / (-radius1);
optixReportIntersection(
t * l,
HIT_INSIDE_FROM_OUTSIDE,
float3_as_args( normal ) );
}
else
{
// do we hit inner sphere from within both spheres?
t = -b + (root > 0 ? sqrtf( root ) : 0.f);
if ( t * l > ray_tmin && t * l < ray_tmax )
{
float3 normal = ( O + t*D )/(-radius1);
optixReportIntersection(
t * l,
HIT_INSIDE_FROM_INSIDE,
float3_as_args( normal ) );
}
else
{
// do we hit outer sphere from between spheres?
c = O_dot_O - sqr_radius2;
root = b*b-c;
t = -b + (root > 0 ? sqrtf( root ) : 0.f);
float3 normal = ( O + t*D )/radius2;
optixReportIntersection(
t * l,
HIT_OUTSIDE_FROM_INSIDE,
float3_as_args( normal ) );
}
}
}
else
{
// do we hit outer sphere from between spheres?
c = O_dot_O - sqr_radius2;
root = b*b-c;
float t = -b + (root > 0 ? sqrtf( root ) : 0.f);
float3 normal = ( O + t*D )/radius2;
optixReportIntersection(
t * l,
HIT_OUTSIDE_FROM_INSIDE,
float3_as_args( normal ) );
}
}
}
#define float3_as_ints( u ) float_as_int( u.x ), float_as_int( u.y ), float_as_int( u.z )
static __device__ float3 get_normal(float t, float3 t0, float3 t1)
{
float3 neg = make_float3(t == t0.x ? 1 : 0, t == t0.y ? 1 : 0, t == t0.z ? 1 : 0);
float3 pos = make_float3(t == t1.x ? 1 : 0, t == t1.y ? 1 : 0, t == t1.z ? 1 : 0);
return pos - neg;
}
static __device__ float2 get_coord(float3 relativeCoord, float3 size)
{
float2 uv;
if (fabs(fabs(relativeCoord.z) - size.z) <= 1e-4) {
uv.x = (relativeCoord.x + size.x) / (2 * size.x);
uv.y = (relativeCoord.y + size.y) / (2 * size.y);
}
else if (fabs(fabs(relativeCoord.x) - size.x) <= 1e-4) {
uv.x = (relativeCoord.z + size.z) / (2 * size.z);
uv.y = (relativeCoord.y + size.y) / (2 * size.y);
}
else if (fabs(fabs(relativeCoord.y) - size.y) <= 1e-4) {
uv.x = (relativeCoord.z + size.z) / (2 * size.z);
uv.y = (relativeCoord.x + size.x) / (2 * size.x);
}
else {
uv.x = 0.0f;
uv.y = 0.0f;
}
return uv;
}
static __device__ cube_face get_face(float3 relativeCoord, float3 size)
{
if (fabs(relativeCoord.x - size.x) <= 1e-4) {
return x_up;
}
else if (fabs(relativeCoord.x + size.x) <= 1e-4) {
return x_down;
}
else if (fabs(relativeCoord.y - size.y) <= 1e-4) {
return y_up;
}
else if (fabs(relativeCoord.y + size.y) <= 1e-4) {
return y_down;
}
else if (fabs(relativeCoord.z - size.z) <= 1e-4) {
return z_up;
}
else if (fabs(relativeCoord.z + size.z) <= 1e-4) {
return z_down;
}
}
extern "C" __global__ void __intersection__cube()
{
const Cube* cube = reinterpret_cast<Cube*>(optixGetSbtDataPointer());
const float3 ray_origin = optixGetWorldRayOrigin();
float3 ray_direction = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin(), ray_tmax = optixGetRayTmax();
float3 cubemin = cube->center - cube->size;
float3 cubemax = cube->center + cube->size;
float3 t0 = (cubemin - ray_origin) / ray_direction;
float3 t1 = (cubemax - ray_origin) / ray_direction;
float3 near = fminf(t0, t1);
float3 far = fmaxf(t0, t1);
float tmin = fmaxf(near);
float tmax = fminf(far);
float eps = 0.0001f;
if (tmin <= tmax) {
bool check_second = true;
if (tmin >= ray_tmin && tmin <= ray_tmax) {
float3 normal = get_normal(tmin, t0, t1);
float3 coord = ray_origin + tmin * ray_direction;
// ����texture�ϵ�u, v
float3 relativeCoord = coord - cube->center;
float2 uv = get_coord(relativeCoord, cube->size);
cube_face face = get_face(relativeCoord, cube->size);
optixReportIntersection(
tmin,
HIT_FROM_OUTSIDE,
float3_as_args(normal),
float_as_int(uv.x),
float_as_int(uv.y),
face
);
check_second = false;
}
if (check_second) {
if (tmax >= ray_tmin && tmax <= ray_tmax) {
float3 normal = get_normal(tmax, t0, t1);
float3 coord = ray_origin + tmax * ray_direction;
// ����texture�ϵ�u, v
float3 relativeCoord = coord - cube->center;
float2 uv = get_coord(relativeCoord, cube->size);
cube_face face = get_face(relativeCoord, cube->size);
optixReportIntersection(
tmax,
HIT_FROM_INSIDE,
float3_as_args(normal),
float_as_int(uv.x),
float_as_int(uv.y),
face
);
}
}
}
}
extern "C" __global__ void __intersection__sphere()
{
const GeometryData::Sphere* hit_group_data = reinterpret_cast<GeometryData::Sphere*>(optixGetSbtDataPointer());
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
const float3 O = ray_orig - hit_group_data->center;
const float l = 1.0f / length(ray_dir);
const float3 D = ray_dir * l;
const float radius = hit_group_data->radius;
float b = dot(O, D);
float c = dot(O, O) - radius * radius;
float disc = b * b - c;
if (disc > 0.0f)
{
float sdisc = sqrtf(disc);
float root1 = (-b - sdisc);
float root11 = 0.0f;
bool check_second = true;
const bool do_refine = fabsf(root1) > (10.0f * radius);
if (do_refine)
{
// refine root1
float3 O1 = O + root1 * D;
b = dot(O1, D);
c = dot(O1, O1) - radius * radius;
disc = b * b - c;
if (disc > 0.0f)
{
sdisc = sqrtf(disc);
root11 = (-b - sdisc);
}
}
float t;
float3 normal;
t = (root1 + root11) * l;
if (t > ray_tmin && t < ray_tmax)
{
normal = (O + (root1 + root11) * D) / radius;
if (optixReportIntersection(t, 0, float3_as_ints(normal), float_as_int(radius)))
check_second = false;
}
if (check_second)
{
float root2 = (-b + sdisc) + (do_refine ? root1 : 0);
t = root2 * l;
normal = (O + root2 * D) / radius;
if (t > ray_tmin && t < ray_tmax)
optixReportIntersection(t, 0, float3_as_ints(normal), float_as_int(radius));
}
}
}
|
1cb81f3d64f2d06b2472514a4cdf0ad0982d7930.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "modToObs.h"
#include "kernel_common.h"
#include "geometry/grid_3d.h"
#include "geometry/SE3.h"
#include "optimization/optimization.h"
#include "util/mirrored_memory.h"
namespace dart {
static const float truncVal = 1000.0;
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObs(const int dims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
float * J = &s[tid*dims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
// const float3 sdfGrad_m = SE3Rotate(T_mc,sdfGrad_m);
getErrorJacobianOfModelPoint(J,make_float4(predV_m,1),predFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
atomicAdd(numPredictions,1);
float * JTr = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
// //#pragma unroll
// for (int i=0; i<dims; i++) {
// if( J[i] == 0.0f) continue;
// float v = residual*J[i];
// atomicAdd(&JTr[i],v);
// //#pragma unroll
// for (int j=0; j<=i; j++) {
// float v2 = J[i]*J[j];
// atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
// }
// }
// atomicAdd(e,0.5*residual*residual);
computeSquaredLossResult(dims,residual,J,e,JTr,JTJ);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsTruncated(const int dims,
const float4 * labeledPredVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float truncationDist,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float err = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
// make sure we're in the truncation region and violating free space
if (err >= truncationDist || err < 0) {
return;
}
if (dbgErr) { debugError[index] = err; }
// const float4 predV_m = T_mc*make_float4(predV_c.x,predV_c.y,predV_c.z,1);
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
float * J = &s[tid*dims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
// const float3 sdfGrad_m = SE3Rotate(T_mc,sdfGrad_m);
getErrorJacobianOfModelPoint(J,make_float4(predV_m,1),predFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
atomicAdd(numPredictions,1);
float * eJ = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
//#pragma unroll
for (int i=0; i<dims; i++) {
if( J[i] == 0.0f) continue;
float v = err*J[i];
atomicAdd(&eJ[i],v);
//#pragma unroll
for (int j=0; j<=i; j++) {
float v2 = J[i]*J[j];
atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
}
}
atomicAdd(e,0.5*err*err);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsReduced(const int fullDims,
const int redDims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float * dtheta_dalpha,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
atomicAdd(numPredictions,1);
getErrorJacobianOfModelPoint(de_dtheta,make_float4(predV_m,1),predFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doPoseGradientReduction(J,de_dtheta,dtheta_dalpha,fullDims,redDims);
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
//#pragma unroll
// for (int i=0; i<redDims; i++) {
// if( J[i]==0.0f) continue;
// float v = residual*J[i];
// atomicAdd(&JTr[i],v);
// //#pragma unroll
// for (int j=0; j<=i; j++) {
// float v2 = J[i]*J[j];
// atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
// }
// }
// atomicAdd(e,0.5*residual*residual);
computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsParamMap(const int fullDims,
const int redDims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const int * dMapping,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
atomicAdd(numPredictions,1);
getErrorJacobianOfModelPoint(de_dtheta,make_float4(predV_m,1),predFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doParamMapping(J,de_dtheta,dMapping,fullDims,redDims);
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ);
}
__global__ void gpu_splatObsSdf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_cm,
const Grid3D<float> * dObsSdf,
const float focalLength) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
const float3 & o = dObsSdf->offset;
const float & resolution = dObsSdf->resolution;
// TODO: think about this
// const float3 center = o + resolution*make_float3( x + 0.5, y + 0.5, z + 0.5);
const float3 center = SE3Transform(T_cm,o + resolution*make_float3( x , y , z ));
const int u = round( (focalLength/center.z)*center.x + (width>>1) );
const int v = round( (focalLength/center.z)*center.y + (height>>1) );
float & splatVal = dObsSdf->data[x + dObsSdf->dim.x*(y + dObsSdf->dim.y*z)];
if (u < 0 || u >= width || v < 0 || v >= height) {
splatVal = truncVal;
} else if (dObsVertMap[u + v*width].w == 0 || dObsVertMap[u + v*width].z == 0) {
splatVal = 0.5*truncVal; // TODO: think about this
// } else {
// float sdfWorld = (dObsVertMap[u + v*width].z - center.z);
// float sdf = (sdfWorld)/dObsSdf->resolution;
// splatVal = fmaxf(0, fminf(truncVal, sdf));
// }
} else if (dObsVertMap[u + v*width].z < center.z) {
splatVal = 0;
} else {
splatVal = truncVal;
}
}
__global__ void gpu_clearObsSdf(const Grid3D<float> * dObsSdf,
const float truncationDist) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
dObsSdf->data[x + dObsSdf->dim.x*(y + dObsSdf->dim.y*z)] = truncationDist;
}
__global__ void gpu_computeTruncatedObsDf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_mc,
const Grid3D<float> * dObsSdf,
const float truncationDist) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = threadIdx.z;
if (x >= width-1 || y >= height-1) { return; }
float4 pA;
float4 pB;
float4 pC;
if (z == 0) {
pA = dObsVertMap[x + y*width];
pB = dObsVertMap[x+1 + y*width];
pC = dObsVertMap[x+1 + (y+1)*width];
} else {
pA = dObsVertMap[x + y*width];
pC = dObsVertMap[x+1 + (y+1)*width];
pB = dObsVertMap[x + (y+1)*width];
}
if (pA.w != 0 && pB.w != 0 && pC.w != 0 ) {
//printf("%d, %d\n",x,y);
const float3 pAg = dObsSdf->getGridCoords(make_float3(T_mc*pA));
const float3 pBg = dObsSdf->getGridCoords(make_float3(T_mc*pB));
const float3 pCg = dObsSdf->getGridCoords(make_float3(T_mc*pC));
const float3 minPoint = fminf(pAg,fminf(pBg,pCg));
const float3 maxPoint = fmaxf(pAg,fmaxf(pBg,pCg));
const float3 E0 = pAg - pBg;
const float3 E1 = pCg - pBg;
float a = dot(E0,E0);
float b = dot(E0,E1);
float c = dot(E1,E1);
float det = a*c-b*b;
for (int gz=max(0,(int)floor(minPoint.z-truncationDist)); gz< min((int)ceil(maxPoint.z+truncationDist),dObsSdf->dim.z); ++gz) {
for (int gy=max(0,(int)floor(minPoint.y-truncationDist)); gy< min((int)ceil(maxPoint.y+truncationDist),dObsSdf->dim.y); ++gy) {
for (int gx=max(0,(int)floor(minPoint.x-truncationDist)); gx< min((int)ceil(maxPoint.x+truncationDist),dObsSdf->dim.x); ++gx) {
//printf("> %d, %d, %d\n",gx,gy,gz);
float & sdfVal = dObsSdf->data[gx + dObsSdf->dim.x*(gy + dObsSdf->dim.y*gz)];
const float3 P = make_float3(gx+0.5,gy+0.5,gz+0.5);
const float3 D = pBg - P;
float d = dot(E0,D);
float e = dot(E1,D);
float f = dot(D,D);
float s = b*e - c*d;
float t = b*d - a*e;
int region;
if ( s+t <= det) {
if ( s < 0 ) {
if ( t < 0 ) {
region = 4;
} else {
region = 3;
}
} else if ( t < 0 ) {
region = 5;
} else {
region = 0;
}
} else {
if ( s < 0 ) {
region = 2;
} else if ( t < 0) {
region = 6;
} else {
region = 1;
}
}
switch (region) {
case 0:
{
float invDet = 1/det;
s*= invDet;
t*= invDet;
}
break;
case 1:
{
float numer = c + e - b - d;
if (numer <= 0) {
s = 0;
} else {
float denom = a - 2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
}
t = 1-s;
}
break;
case 2:
{
float tmp0 = b+d;
float tmp1 = c+e;
if ( tmp1 > tmp0 ) { // min on edge s+1=1
float numer = tmp1 - tmp0;
float denom = a - 2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
t = 1-s;
} else { // min on edge s=0
s = 0;
t = ( tmp1 <= 0 ? 1 : ( e >= 0 ? 0 : -e/c ) );
}
}
break;
case 3:
s = 0;
t = ( e >= 0 ? 0 :
( -e >= c ? 1 : -e/c ) );
break;
case 4:
if ( d < 0 ) { // min on edge t=0
t = 0;
s = ( d >= 0 ? 0 :
( -d >= a ? 1 : -d/a ) );
} else { // min on edge s = 0
s = 0;
t = ( e >= 0 ? 0 :
( -e >= c ? 1 : -e/c ) );
}
break;
case 5:
t = 0;
s = ( d >= 0 ? 0 :
( -d >= a ? 1 : -d/a ) );
break;
case 6:
{
float tmp0 = a+d;
float tmp1 = b+e;
if (tmp0 > tmp1) { // min on edge s+1=1
float numer = c + e - b - d;
float denom = a -2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
t = 1-s;
} else { // min on edge t=1
t = 0;
s = ( tmp0 <= 0 ? 1 : ( d >= 0 ? 0 : -d/a ));
}
}
break;
}
const float3 closestPoint = pBg + s*E0 + t*E1;
const float3 v = closestPoint-P;
float dist = length(v);
float3 unscaledNorm = cross(pAg-pBg,pCg-pBg);
if (dot(v,unscaledNorm) < 0) { dist = -dist; }
//atomicMin(&sdfVal,length);
// TODO
//sdfVal = min(sdfVal,list);
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
//printf("%f\n",sdfVal);
}
}
}
}
}
__global__ void gpu_signTruncatedObsDf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_cm,
const Grid3D<float> * dObsSdf,
const float focalLength) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width-1 || y >= height-1) { return; }
if (dObsVertMap[x + y*width].w != 0 && dObsVertMap[x+1 + y*width].w != 0 && dObsVertMap[x+1 + (y+1)*width].w != 0 ) {
}
}
__global__ void gpu_errorModToObs(const float4 * labeledPredVertMap,
const int width,
const int height,
const Grid3D<float> * obsSdf,
float* result) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const float4 & predV = labeledPredVertMap[index];
// no prediction
if (predV.z == 0) { return; }
const float3 predVGrid = obsSdf->getGridCoords(make_float3(predV));
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float err = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
//atomicAdd(numPredictions,1);
atomicAdd(result,0.5*err*err);
}
__global__ void gpu_cullUnobservable(float4 * predVertMap,
const int predWidth,
const int predHeight,
const float4 * obsVertMap,
const int obsWidth,
const int obsHeight) {
const int predX = blockIdx.x*blockDim.x + threadIdx.x;
const int predY = blockIdx.y*blockDim.y + threadIdx.y;
if (predX >= predWidth || predY >= predHeight) { return; }
const int predIndex = predX + predY*predWidth;
const int obsX = predX*obsWidth/predWidth;
const int obsY = predY*obsHeight/predHeight;
const int obsIndex = obsX + obsY*obsWidth;
if (obsVertMap[obsIndex].w <= 0 || //obsVertMap[obsIndex].z == 0 ||
obsVertMap[obsIndex+1].w <= 0 || //obsVertMap[obsIndex+1].z == 0 ||
obsVertMap[obsIndex+obsWidth].w <= 0 || //obsVertMap[obsIndex+obsWidth].z == 0 ||
obsVertMap[obsIndex+obsWidth+1].w <= 0 //|| obsVertMap[obsIndex+obsWidth+1].z == 0
) {
predVertMap[predIndex].z = 0;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
void normEqnsModToObs(const int dimensions,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
const SE3 T_gc,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
hipMemset(dResult,0,(dimensions + JTJSize(dimensions) + 1)*sizeof(float));
hipMemset(numPredictions,0,sizeof(int));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<false,false,false>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<false,false,true>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<false,true,false>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<false,true,true>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<true,false,false>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<true,false,true>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<true,true,false>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObs<true,true,true>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("normEqnsModToObs: %s\n",hipGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsTruncated(const int dimensions,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
const float truncationDistance,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
hipMemset(dResult,0,(dimensions + JTJSize(dimensions) + 1)*sizeof(float));
hipMemset(numPredictions,0,sizeof(int));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<false,false,false>), dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<false,false,true>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<false,true,false>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<false,true,true>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<true,false,false>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<true,false,true>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<true,true,false>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsTruncated<true,true,true>) , dim3(grid),dim3(block),64*dimensions*sizeof(float), 0, dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("normEqnsModToObs: %s\n",hipGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsReduced(const int dims,
const int reductionDims,
const float * d_dtheta_dalpha,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
hipMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<false,false,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<false,false,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<false,true,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<false,true,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<true,false,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<true,false,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<true,true,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsReduced<true,true,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("normEqnsModToObsReduced: %s\n",hipGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsParamMap(const int dims,
const int reductionDims,
const int * dMapping,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
hipMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<false,false,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<false,false,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<false,true,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<false,true,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<true,false,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<true,false,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<true,true,false>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
hipLaunchKernelGGL(( gpu_normEqnsModToObsParamMap<true,true,true>), dim3(grid),dim3(block),64*(dims+reductionDims)*sizeof(float), 0, dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("normEqnsModToObsReduced: %s\n",hipGetErrorString(err));
}
#endif
}
}
void splatObsSdfZeros(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 & T_cm,
const Grid3D<float> * dObsSdf,
const uint3 sdfDim,
const float focalLength) {
dim3 block(8,8,4);
dim3 grid(sdfDim.x / block.x, sdfDim.y / block.y, sdfDim.z / block.z );
hipLaunchKernelGGL(( gpu_splatObsSdf), dim3(grid),dim3(block), 0, 0, dObsVertMap,
width,
height,
T_cm,
dObsSdf,
focalLength);
}
void computeTruncatedObsSdf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 & T_mc,
const Grid3D<float> * dObsSdf,
const uint3 sdfDim,
const float truncationDist) {
dim3 block(8,8,4);
dim3 grid(sdfDim.x / block.x, sdfDim.y / block.y, sdfDim.z / block.z );
hipLaunchKernelGGL(( gpu_clearObsSdf), dim3(grid),dim3(block), 0, 0, dObsSdf, truncationDist);
block = dim3(16,8,2);
grid = dim3( ceil( width / (float)block.x), ceil(height / (float)block.y ), 1);
hipLaunchKernelGGL(( gpu_computeTruncatedObsDf), dim3(grid),dim3(block), 0, 0, dObsVertMap,width,height,T_mc,dObsSdf,truncationDist);
//gpu_signTruncatedObsDf<<<grid,block>>>(dObsVertMap,width,height,T_cm,dObsSdf,focalLength);
}
float errorModToObs(const float4 *dLabeledPredictedVertMap,
const int width,
const int height,
const Grid3D<float> *dObsSdf) {
dim3 block(16,8);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
static MirroredVector<float> error(1);
hipMemset(error.devicePtr(),0,sizeof(float));
hipLaunchKernelGGL(( gpu_errorModToObs), dim3(grid),dim3(block), 0, 0, dLabeledPredictedVertMap,width,height,dObsSdf,error.devicePtr());
error.syncDeviceToHost();
return error.hostPtr()[0];
}
void cullUnobservable_(float4 * predVertMap,
const int predWidth,
const int predHeight,
const float4 * obsVertMap,
const int obsWidth,
const int obsHeight,
const hipStream_t stream) {
dim3 block(8,8,1);
dim3 grid( ceil( predWidth / (float)block.x), ceil(predHeight / (float)block.y ));
hipLaunchKernelGGL(( gpu_cullUnobservable), dim3(grid),dim3(block),0,stream, predVertMap,predWidth,predHeight,
obsVertMap,obsWidth,obsHeight);
}
}
| 1cb81f3d64f2d06b2472514a4cdf0ad0982d7930.cu | #include "modToObs.h"
#include "kernel_common.h"
#include "geometry/grid_3d.h"
#include "geometry/SE3.h"
#include "optimization/optimization.h"
#include "util/mirrored_memory.h"
namespace dart {
static const float truncVal = 1000.0;
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObs(const int dims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
float * J = &s[tid*dims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
// const float3 sdfGrad_m = SE3Rotate(T_mc,sdfGrad_m);
getErrorJacobianOfModelPoint(J,make_float4(predV_m,1),predFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
atomicAdd(numPredictions,1);
float * JTr = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
// //#pragma unroll
// for (int i=0; i<dims; i++) {
// if( J[i] == 0.0f) continue;
// float v = residual*J[i];
// atomicAdd(&JTr[i],v);
// //#pragma unroll
// for (int j=0; j<=i; j++) {
// float v2 = J[i]*J[j];
// atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
// }
// }
// atomicAdd(e,0.5*residual*residual);
computeSquaredLossResult(dims,residual,J,e,JTr,JTJ);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsTruncated(const int dims,
const float4 * labeledPredVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float truncationDist,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float err = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
// make sure we're in the truncation region and violating free space
if (err >= truncationDist || err < 0) {
return;
}
if (dbgErr) { debugError[index] = err; }
// const float4 predV_m = T_mc*make_float4(predV_c.x,predV_c.y,predV_c.z,1);
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
float * J = &s[tid*dims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
// const float3 sdfGrad_m = SE3Rotate(T_mc,sdfGrad_m);
getErrorJacobianOfModelPoint(J,make_float4(predV_m,1),predFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
atomicAdd(numPredictions,1);
float * eJ = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
//#pragma unroll
for (int i=0; i<dims; i++) {
if( J[i] == 0.0f) continue;
float v = err*J[i];
atomicAdd(&eJ[i],v);
//#pragma unroll
for (int j=0; j<=i; j++) {
float v2 = J[i]*J[j];
atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
}
}
atomicAdd(e,0.5*err*err);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsReduced(const int fullDims,
const int redDims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float * dtheta_dalpha,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
atomicAdd(numPredictions,1);
getErrorJacobianOfModelPoint(de_dtheta,make_float4(predV_m,1),predFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doPoseGradientReduction(J,de_dtheta,dtheta_dalpha,fullDims,redDims);
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
//#pragma unroll
// for (int i=0; i<redDims; i++) {
// if( J[i]==0.0f) continue;
// float v = residual*J[i];
// atomicAdd(&JTr[i],v);
// //#pragma unroll
// for (int j=0; j<=i; j++) {
// float v2 = J[i]*J[j];
// atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
// }
// }
// atomicAdd(e,0.5*residual*residual);
computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsParamMap(const int fullDims,
const int redDims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const int * dMapping,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
atomicAdd(numPredictions,1);
getErrorJacobianOfModelPoint(de_dtheta,make_float4(predV_m,1),predFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doParamMapping(J,de_dtheta,dMapping,fullDims,redDims);
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ);
}
__global__ void gpu_splatObsSdf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_cm,
const Grid3D<float> * dObsSdf,
const float focalLength) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
const float3 & o = dObsSdf->offset;
const float & resolution = dObsSdf->resolution;
// TODO: think about this
// const float3 center = o + resolution*make_float3( x + 0.5, y + 0.5, z + 0.5);
const float3 center = SE3Transform(T_cm,o + resolution*make_float3( x , y , z ));
const int u = round( (focalLength/center.z)*center.x + (width>>1) );
const int v = round( (focalLength/center.z)*center.y + (height>>1) );
float & splatVal = dObsSdf->data[x + dObsSdf->dim.x*(y + dObsSdf->dim.y*z)];
if (u < 0 || u >= width || v < 0 || v >= height) {
splatVal = truncVal;
} else if (dObsVertMap[u + v*width].w == 0 || dObsVertMap[u + v*width].z == 0) {
splatVal = 0.5*truncVal; // TODO: think about this
// } else {
// float sdfWorld = (dObsVertMap[u + v*width].z - center.z);
// float sdf = (sdfWorld)/dObsSdf->resolution;
// splatVal = fmaxf(0, fminf(truncVal, sdf));
// }
} else if (dObsVertMap[u + v*width].z < center.z) {
splatVal = 0;
} else {
splatVal = truncVal;
}
}
__global__ void gpu_clearObsSdf(const Grid3D<float> * dObsSdf,
const float truncationDist) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
dObsSdf->data[x + dObsSdf->dim.x*(y + dObsSdf->dim.y*z)] = truncationDist;
}
__global__ void gpu_computeTruncatedObsDf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_mc,
const Grid3D<float> * dObsSdf,
const float truncationDist) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = threadIdx.z;
if (x >= width-1 || y >= height-1) { return; }
float4 pA;
float4 pB;
float4 pC;
if (z == 0) {
pA = dObsVertMap[x + y*width];
pB = dObsVertMap[x+1 + y*width];
pC = dObsVertMap[x+1 + (y+1)*width];
} else {
pA = dObsVertMap[x + y*width];
pC = dObsVertMap[x+1 + (y+1)*width];
pB = dObsVertMap[x + (y+1)*width];
}
if (pA.w != 0 && pB.w != 0 && pC.w != 0 ) {
//printf("%d, %d\n",x,y);
const float3 pAg = dObsSdf->getGridCoords(make_float3(T_mc*pA));
const float3 pBg = dObsSdf->getGridCoords(make_float3(T_mc*pB));
const float3 pCg = dObsSdf->getGridCoords(make_float3(T_mc*pC));
const float3 minPoint = fminf(pAg,fminf(pBg,pCg));
const float3 maxPoint = fmaxf(pAg,fmaxf(pBg,pCg));
const float3 E0 = pAg - pBg;
const float3 E1 = pCg - pBg;
float a = dot(E0,E0);
float b = dot(E0,E1);
float c = dot(E1,E1);
float det = a*c-b*b;
for (int gz=max(0,(int)floor(minPoint.z-truncationDist)); gz< min((int)ceil(maxPoint.z+truncationDist),dObsSdf->dim.z); ++gz) {
for (int gy=max(0,(int)floor(minPoint.y-truncationDist)); gy< min((int)ceil(maxPoint.y+truncationDist),dObsSdf->dim.y); ++gy) {
for (int gx=max(0,(int)floor(minPoint.x-truncationDist)); gx< min((int)ceil(maxPoint.x+truncationDist),dObsSdf->dim.x); ++gx) {
//printf("> %d, %d, %d\n",gx,gy,gz);
float & sdfVal = dObsSdf->data[gx + dObsSdf->dim.x*(gy + dObsSdf->dim.y*gz)];
const float3 P = make_float3(gx+0.5,gy+0.5,gz+0.5);
const float3 D = pBg - P;
float d = dot(E0,D);
float e = dot(E1,D);
float f = dot(D,D);
float s = b*e - c*d;
float t = b*d - a*e;
int region;
if ( s+t <= det) {
if ( s < 0 ) {
if ( t < 0 ) {
region = 4;
} else {
region = 3;
}
} else if ( t < 0 ) {
region = 5;
} else {
region = 0;
}
} else {
if ( s < 0 ) {
region = 2;
} else if ( t < 0) {
region = 6;
} else {
region = 1;
}
}
switch (region) {
case 0:
{
float invDet = 1/det;
s*= invDet;
t*= invDet;
}
break;
case 1:
{
float numer = c + e - b - d;
if (numer <= 0) {
s = 0;
} else {
float denom = a - 2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
}
t = 1-s;
}
break;
case 2:
{
float tmp0 = b+d;
float tmp1 = c+e;
if ( tmp1 > tmp0 ) { // min on edge s+1=1
float numer = tmp1 - tmp0;
float denom = a - 2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
t = 1-s;
} else { // min on edge s=0
s = 0;
t = ( tmp1 <= 0 ? 1 : ( e >= 0 ? 0 : -e/c ) );
}
}
break;
case 3:
s = 0;
t = ( e >= 0 ? 0 :
( -e >= c ? 1 : -e/c ) );
break;
case 4:
if ( d < 0 ) { // min on edge t=0
t = 0;
s = ( d >= 0 ? 0 :
( -d >= a ? 1 : -d/a ) );
} else { // min on edge s = 0
s = 0;
t = ( e >= 0 ? 0 :
( -e >= c ? 1 : -e/c ) );
}
break;
case 5:
t = 0;
s = ( d >= 0 ? 0 :
( -d >= a ? 1 : -d/a ) );
break;
case 6:
{
float tmp0 = a+d;
float tmp1 = b+e;
if (tmp0 > tmp1) { // min on edge s+1=1
float numer = c + e - b - d;
float denom = a -2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
t = 1-s;
} else { // min on edge t=1
t = 0;
s = ( tmp0 <= 0 ? 1 : ( d >= 0 ? 0 : -d/a ));
}
}
break;
}
const float3 closestPoint = pBg + s*E0 + t*E1;
const float3 v = closestPoint-P;
float dist = length(v);
float3 unscaledNorm = cross(pAg-pBg,pCg-pBg);
if (dot(v,unscaledNorm) < 0) { dist = -dist; }
//atomicMin(&sdfVal,length);
// TODO
//sdfVal = min(sdfVal,list);
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
//printf("%f\n",sdfVal);
}
}
}
}
}
__global__ void gpu_signTruncatedObsDf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_cm,
const Grid3D<float> * dObsSdf,
const float focalLength) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width-1 || y >= height-1) { return; }
if (dObsVertMap[x + y*width].w != 0 && dObsVertMap[x+1 + y*width].w != 0 && dObsVertMap[x+1 + (y+1)*width].w != 0 ) {
}
}
__global__ void gpu_errorModToObs(const float4 * labeledPredVertMap,
const int width,
const int height,
const Grid3D<float> * obsSdf,
float* result) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const float4 & predV = labeledPredVertMap[index];
// no prediction
if (predV.z == 0) { return; }
const float3 predVGrid = obsSdf->getGridCoords(make_float3(predV));
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float err = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
//atomicAdd(numPredictions,1);
atomicAdd(result,0.5*err*err);
}
__global__ void gpu_cullUnobservable(float4 * predVertMap,
const int predWidth,
const int predHeight,
const float4 * obsVertMap,
const int obsWidth,
const int obsHeight) {
const int predX = blockIdx.x*blockDim.x + threadIdx.x;
const int predY = blockIdx.y*blockDim.y + threadIdx.y;
if (predX >= predWidth || predY >= predHeight) { return; }
const int predIndex = predX + predY*predWidth;
const int obsX = predX*obsWidth/predWidth;
const int obsY = predY*obsHeight/predHeight;
const int obsIndex = obsX + obsY*obsWidth;
if (obsVertMap[obsIndex].w <= 0 || //obsVertMap[obsIndex].z == 0 ||
obsVertMap[obsIndex+1].w <= 0 || //obsVertMap[obsIndex+1].z == 0 ||
obsVertMap[obsIndex+obsWidth].w <= 0 || //obsVertMap[obsIndex+obsWidth].z == 0 ||
obsVertMap[obsIndex+obsWidth+1].w <= 0 //|| obsVertMap[obsIndex+obsWidth+1].z == 0
) {
predVertMap[predIndex].z = 0;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
void normEqnsModToObs(const int dimensions,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
const SE3 T_gc,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(dimensions + JTJSize(dimensions) + 1)*sizeof(float));
cudaMemset(numPredictions,0,sizeof(int));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObs<false,false,false><<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<false,false,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObs<false,true,false><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<false,true,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObs<true,false,false><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<true,false,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObs<true,true,false><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<true,true,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObs: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsTruncated(const int dimensions,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
const float truncationDistance,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(dimensions + JTJSize(dimensions) + 1)*sizeof(float));
cudaMemset(numPredictions,0,sizeof(int));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<false,false,false><<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<false,false,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<false,true,false> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<false,true,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<true,false,false> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<true,false,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<true,true,false> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<true,true,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObs: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsReduced(const int dims,
const int reductionDims,
const float * d_dtheta_dalpha,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<false,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<false,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<false,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<false,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<true,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<true,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<true,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<true,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObsReduced: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsParamMap(const int dims,
const int reductionDims,
const int * dMapping,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<false,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<false,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<false,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<false,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<true,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<true,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<true,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<true,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObsReduced: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void splatObsSdfZeros(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 & T_cm,
const Grid3D<float> * dObsSdf,
const uint3 sdfDim,
const float focalLength) {
dim3 block(8,8,4);
dim3 grid(sdfDim.x / block.x, sdfDim.y / block.y, sdfDim.z / block.z );
gpu_splatObsSdf<<<grid,block>>>(dObsVertMap,
width,
height,
T_cm,
dObsSdf,
focalLength);
}
void computeTruncatedObsSdf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 & T_mc,
const Grid3D<float> * dObsSdf,
const uint3 sdfDim,
const float truncationDist) {
dim3 block(8,8,4);
dim3 grid(sdfDim.x / block.x, sdfDim.y / block.y, sdfDim.z / block.z );
gpu_clearObsSdf<<<grid,block>>>(dObsSdf, truncationDist);
block = dim3(16,8,2);
grid = dim3( ceil( width / (float)block.x), ceil(height / (float)block.y ), 1);
gpu_computeTruncatedObsDf<<<grid,block>>>(dObsVertMap,width,height,T_mc,dObsSdf,truncationDist);
//gpu_signTruncatedObsDf<<<grid,block>>>(dObsVertMap,width,height,T_cm,dObsSdf,focalLength);
}
float errorModToObs(const float4 *dLabeledPredictedVertMap,
const int width,
const int height,
const Grid3D<float> *dObsSdf) {
dim3 block(16,8);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
static MirroredVector<float> error(1);
cudaMemset(error.devicePtr(),0,sizeof(float));
gpu_errorModToObs<<<grid,block>>>(dLabeledPredictedVertMap,width,height,dObsSdf,error.devicePtr());
error.syncDeviceToHost();
return error.hostPtr()[0];
}
void cullUnobservable_(float4 * predVertMap,
const int predWidth,
const int predHeight,
const float4 * obsVertMap,
const int obsWidth,
const int obsHeight,
const cudaStream_t stream) {
dim3 block(8,8,1);
dim3 grid( ceil( predWidth / (float)block.x), ceil(predHeight / (float)block.y ));
gpu_cullUnobservable<<<grid,block,0,stream>>>(predVertMap,predWidth,predHeight,
obsVertMap,obsWidth,obsHeight);
}
}
|
47fec1bd70406766654d0a607316d937e93ab78c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*The number of threads per block and the number of blocks per grid specified in the <<<...>>> syntax can be of type int or dim3.
Two-dimensional blocks or grids can be specified as in the example above.
Each block within the grid can be identified by a one-dimensional, two-dimensional, or three-dimensional index accessible
within the kernel through the built-in blockIdx variable. The dimension of the thread block is accessible within the kernel
through the built-in blockDim variable.
Extending the previous MatAdd() example to handle multiple blocks, the code becomes as follows.
*/
#include <stdio.h>
#define N 1024
__device__ int A[N][N];
__device__ int B[N][N];
__device__ int C[N][N];
__global__ void MatAdd()
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N)
C[i][j] = A[i][j] + B[i][j];
}
int main()
{
// Kernel invocation
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, );
hipDeviceSynchronize();
}
/*A thread block size of 16x16 (256 threads), although arbitrary in this case, is a common choice. The grid is created with enough
blocks to have one thread per matrix element as before. For simplicity, this example assumes that the number of threads per
grid in each dimension is evenly divisible by the number of threads per block in that dimension, although that need not be
the case.
Thread blocks are required to execute independently: It must be possible to execute them in any order, in parallel or in series.
This independence requirement allows thread blocks to be scheduled in any order across any number of cores as illustrated by
Figure 5, enabling programmers to write code that scales with the number of cores.
Threads within a block can cooperate by sharing data through some shared memory and by synchronizing their execution to
coordinate memory accesses. More precisely, one can specify synchronization points in the kernel by calling the __syncthreads()
intrinsic function; __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to
proceed. Shared Memory gives an example of using shared memory.
For efficient cooperation, the shared memory is expected to be a low-latency memory near each processor core
(much like an L1 cache) and __syncthreads() is expected to be lightweight.
*/ | 47fec1bd70406766654d0a607316d937e93ab78c.cu | /*The number of threads per block and the number of blocks per grid specified in the <<<...>>> syntax can be of type int or dim3.
Two-dimensional blocks or grids can be specified as in the example above.
Each block within the grid can be identified by a one-dimensional, two-dimensional, or three-dimensional index accessible
within the kernel through the built-in blockIdx variable. The dimension of the thread block is accessible within the kernel
through the built-in blockDim variable.
Extending the previous MatAdd() example to handle multiple blocks, the code becomes as follows.
*/
#include <stdio.h>
#define N 1024
__device__ int A[N][N];
__device__ int B[N][N];
__device__ int C[N][N];
__global__ void MatAdd()
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N)
C[i][j] = A[i][j] + B[i][j];
}
int main()
{
// Kernel invocation
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
MatAdd<<<numBlocks, threadsPerBlock>>>();
cudaDeviceSynchronize();
}
/*A thread block size of 16x16 (256 threads), although arbitrary in this case, is a common choice. The grid is created with enough
blocks to have one thread per matrix element as before. For simplicity, this example assumes that the number of threads per
grid in each dimension is evenly divisible by the number of threads per block in that dimension, although that need not be
the case.
Thread blocks are required to execute independently: It must be possible to execute them in any order, in parallel or in series.
This independence requirement allows thread blocks to be scheduled in any order across any number of cores as illustrated by
Figure 5, enabling programmers to write code that scales with the number of cores.
Threads within a block can cooperate by sharing data through some shared memory and by synchronizing their execution to
coordinate memory accesses. More precisely, one can specify synchronization points in the kernel by calling the __syncthreads()
intrinsic function; __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to
proceed. Shared Memory gives an example of using shared memory.
For efficient cooperation, the shared memory is expected to be a low-latency memory near each processor core
(much like an L1 cache) and __syncthreads() is expected to be lightweight.
*/ |
20cc14e8b06b3d618cf736d0991497a1072766ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void MaskedIm2colForward(const int n, const scalar_t *data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int64_t *mask_h_idx,
const int64_t *mask_w_idx,
const int mask_cnt, scalar_t *data_col) {
// mask_cnt * channels
CUDA_1D_KERNEL_LOOP(index, n) {
const int m_index = index % mask_cnt;
const int h_col = mask_h_idx[m_index];
const int w_col = mask_w_idx[m_index];
const int c_im = index / mask_cnt;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col - pad_h;
const int w_offset = w_col - pad_w;
scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index;
for (int i = 0; i < kernel_h; ++i) {
int h_im = h_offset + i;
for (int j = 0; j < kernel_w; ++j) {
int w_im = w_offset + j;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
*data_col_ptr =
(scalar_t)data_im[(c_im * height + h_im) * width + w_im];
} else {
*data_col_ptr = 0.0;
}
data_col_ptr += mask_cnt;
}
}
}
}
int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height,
const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const at::Tensor mask_h_idx,
const at::Tensor mask_w_idx, const int mask_cnt,
at::Tensor top_data) {
const int output_size = mask_cnt * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data<int64_t>();
scalar_t *top_data_ = top_data.data<scalar_t>();
hipLaunchKernelGGL(( MaskedIm2colForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, bottom_data_, height, width, kernel_h, kernel_w,
pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_);
}));
THCudaCheck(hipGetLastError());
return 1;
}
template <typename scalar_t>
__global__ void MaskedCol2imForward(const int n, const scalar_t *data_col,
const int height, const int width,
const int channels,
const int64_t *mask_h_idx,
const int64_t *mask_w_idx,
const int mask_cnt, scalar_t *data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int m_index = index % mask_cnt;
const int h_im = mask_h_idx[m_index];
const int w_im = mask_w_idx[m_index];
const int c_im = index / mask_cnt;
// compute the start and end of the output
data_im[(c_im * height + h_im) * width + w_im] = data_col[index];
}
}
int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height,
const int width, const int channels,
const at::Tensor mask_h_idx,
const at::Tensor mask_w_idx, const int mask_cnt,
at::Tensor top_data) {
const int output_size = mask_cnt * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data<int64_t>();
scalar_t *top_data_ = top_data.data<scalar_t>();
hipLaunchKernelGGL(( MaskedCol2imForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, bottom_data_, height, width, channels, mask_h_idx_,
mask_w_idx_, mask_cnt, top_data_);
}));
THCudaCheck(hipGetLastError());
return 1;
}
| 20cc14e8b06b3d618cf736d0991497a1072766ca.cu | #include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void MaskedIm2colForward(const int n, const scalar_t *data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int64_t *mask_h_idx,
const int64_t *mask_w_idx,
const int mask_cnt, scalar_t *data_col) {
// mask_cnt * channels
CUDA_1D_KERNEL_LOOP(index, n) {
const int m_index = index % mask_cnt;
const int h_col = mask_h_idx[m_index];
const int w_col = mask_w_idx[m_index];
const int c_im = index / mask_cnt;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col - pad_h;
const int w_offset = w_col - pad_w;
scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index;
for (int i = 0; i < kernel_h; ++i) {
int h_im = h_offset + i;
for (int j = 0; j < kernel_w; ++j) {
int w_im = w_offset + j;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
*data_col_ptr =
(scalar_t)data_im[(c_im * height + h_im) * width + w_im];
} else {
*data_col_ptr = 0.0;
}
data_col_ptr += mask_cnt;
}
}
}
}
int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height,
const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const at::Tensor mask_h_idx,
const at::Tensor mask_w_idx, const int mask_cnt,
at::Tensor top_data) {
const int output_size = mask_cnt * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data<int64_t>();
scalar_t *top_data_ = top_data.data<scalar_t>();
MaskedIm2colForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data_, height, width, kernel_h, kernel_w,
pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__global__ void MaskedCol2imForward(const int n, const scalar_t *data_col,
const int height, const int width,
const int channels,
const int64_t *mask_h_idx,
const int64_t *mask_w_idx,
const int mask_cnt, scalar_t *data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int m_index = index % mask_cnt;
const int h_im = mask_h_idx[m_index];
const int w_im = mask_w_idx[m_index];
const int c_im = index / mask_cnt;
// compute the start and end of the output
data_im[(c_im * height + h_im) * width + w_im] = data_col[index];
}
}
int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height,
const int width, const int channels,
const at::Tensor mask_h_idx,
const at::Tensor mask_w_idx, const int mask_cnt,
at::Tensor top_data) {
const int output_size = mask_cnt * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data<scalar_t>();
const int64_t *mask_h_idx_ = mask_h_idx.data<int64_t>();
const int64_t *mask_w_idx_ = mask_w_idx.data<int64_t>();
scalar_t *top_data_ = top_data.data<scalar_t>();
MaskedCol2imForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data_, height, width, channels, mask_h_idx_,
mask_w_idx_, mask_cnt, top_data_);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
|
7de8a67b15c2fa8fe7a70e4af756e347e345bb3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
__global__
void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid]-lum_min) / lum_range) * bin_count;
atomicAdd(&d_bins[bin], 1);
}
__global__
void scan_kernel(unsigned int* d_bins, int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
for(int s = 1; s <= size; s *= 2) {
int spot = mid - s;
unsigned int val = 0;
if(spot >= 0)
val = d_bins[spot];
__syncthreads();
if(spot >= 0)
d_bins[mid] += val;
__syncthreads();
}
}
__global__
void reduce_minmax_kernel(const float* const d_in, float* d_out, const size_t size, int minmax) {
extern __shared__ float shared[];
int mid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if(mid < size) {
shared[tid] = d_in[mid];
} else {
if(minmax == 0)
shared[tid] = FLT_MAX;
else
shared[tid] = -FLT_MAX;
}
__syncthreads();
if(mid >= size) {
if(tid == 0) {
if(minmax == 0)
d_out[blockIdx.x] = FLT_MAX;
else
d_out[blockIdx.x] = -FLT_MAX;
}
return;
}
for(unsigned int s = blockDim.x/2; s > 0; s /= 2) {
if(tid < s) {
if(minmax == 0) {
shared[tid] = min(shared[tid], shared[tid+s]);
} else {
shared[tid] = max(shared[tid], shared[tid+s]);
}
}
__syncthreads();
}
if(tid == 0) {
d_out[blockIdx.x] = shared[0];
}
}
int get_max_size(int n, int d) {
return (int)ceil( (float)n/(float)d ) + 1;
}
float reduce_minmax(const float* const d_in, const size_t size, int minmax) {
int BLOCK_SIZE = 32;
size_t curr_size = size;
float* d_curr_in;
checkCudaErrors(hipMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(hipMemcpy(d_curr_in, d_in, sizeof(float) * size, hipMemcpyDeviceToDevice));
float* d_curr_out;
dim3 thread_dim(BLOCK_SIZE);
const int shared_mem_size = sizeof(float)*BLOCK_SIZE;
while(1) {
checkCudaErrors(hipMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE)));
dim3 block_dim(get_max_size(size, BLOCK_SIZE));
hipLaunchKernelGGL(( reduce_minmax_kernel), dim3(block_dim), dim3(thread_dim), shared_mem_size, 0,
d_curr_in,
d_curr_out,
curr_size,
minmax
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_curr_in));
d_curr_in = d_curr_out;
if(curr_size < BLOCK_SIZE)
break;
curr_size = get_max_size(curr_size, BLOCK_SIZE);
}
float h_out;
hipMemcpy(&h_out, d_curr_out, sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_curr_out);
return h_out;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
const size_t size = numRows*numCols;
min_logLum = reduce_minmax(d_logLuminance, size, 0);
max_logLum = reduce_minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int)*numBins;
checkCudaErrors(hipMalloc(&d_bins, histo_size));
checkCudaErrors(hipMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
unsigned int h_out[100];
hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("hist out %d\n", h_out[i]);
dim3 scan_block_dim(get_max_size(numBins, thread_dim.x));
hipLaunchKernelGGL(( scan_kernel), dim3(scan_block_dim), dim3(thread_dim), 0, 0, d_bins, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("cdf out %d\n", h_out[i]);
hipMemcpy(d_cdf, d_bins, histo_size, hipMemcpyDeviceToDevice);
checkCudaErrors(hipFree(d_bins));
}
| 7de8a67b15c2fa8fe7a70e4af756e347e345bb3f.cu | #include <limits.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "utils.h"
__global__
void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid]-lum_min) / lum_range) * bin_count;
atomicAdd(&d_bins[bin], 1);
}
__global__
void scan_kernel(unsigned int* d_bins, int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
for(int s = 1; s <= size; s *= 2) {
int spot = mid - s;
unsigned int val = 0;
if(spot >= 0)
val = d_bins[spot];
__syncthreads();
if(spot >= 0)
d_bins[mid] += val;
__syncthreads();
}
}
__global__
void reduce_minmax_kernel(const float* const d_in, float* d_out, const size_t size, int minmax) {
extern __shared__ float shared[];
int mid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if(mid < size) {
shared[tid] = d_in[mid];
} else {
if(minmax == 0)
shared[tid] = FLT_MAX;
else
shared[tid] = -FLT_MAX;
}
__syncthreads();
if(mid >= size) {
if(tid == 0) {
if(minmax == 0)
d_out[blockIdx.x] = FLT_MAX;
else
d_out[blockIdx.x] = -FLT_MAX;
}
return;
}
for(unsigned int s = blockDim.x/2; s > 0; s /= 2) {
if(tid < s) {
if(minmax == 0) {
shared[tid] = min(shared[tid], shared[tid+s]);
} else {
shared[tid] = max(shared[tid], shared[tid+s]);
}
}
__syncthreads();
}
if(tid == 0) {
d_out[blockIdx.x] = shared[0];
}
}
int get_max_size(int n, int d) {
return (int)ceil( (float)n/(float)d ) + 1;
}
float reduce_minmax(const float* const d_in, const size_t size, int minmax) {
int BLOCK_SIZE = 32;
size_t curr_size = size;
float* d_curr_in;
checkCudaErrors(cudaMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(cudaMemcpy(d_curr_in, d_in, sizeof(float) * size, cudaMemcpyDeviceToDevice));
float* d_curr_out;
dim3 thread_dim(BLOCK_SIZE);
const int shared_mem_size = sizeof(float)*BLOCK_SIZE;
while(1) {
checkCudaErrors(cudaMalloc(&d_curr_out, sizeof(float) * get_max_size(curr_size, BLOCK_SIZE)));
dim3 block_dim(get_max_size(size, BLOCK_SIZE));
reduce_minmax_kernel<<<block_dim, thread_dim, shared_mem_size>>>(
d_curr_in,
d_curr_out,
curr_size,
minmax
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_curr_in));
d_curr_in = d_curr_out;
if(curr_size < BLOCK_SIZE)
break;
curr_size = get_max_size(curr_size, BLOCK_SIZE);
}
float h_out;
cudaMemcpy(&h_out, d_curr_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_curr_out);
return h_out;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
const size_t size = numRows*numCols;
min_logLum = reduce_minmax(d_logLuminance, size, 0);
max_logLum = reduce_minmax(d_logLuminance, size, 1);
printf("got min of %f\n", min_logLum);
printf("got max of %f\n", max_logLum);
printf("numBins %d\n", numBins);
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int)*numBins;
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
histogram_kernel<<<hist_block_dim, thread_dim>>>(d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
unsigned int h_out[100];
cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("hist out %d\n", h_out[i]);
dim3 scan_block_dim(get_max_size(numBins, thread_dim.x));
scan_kernel<<<scan_block_dim, thread_dim>>>(d_bins, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("cdf out %d\n", h_out[i]);
cudaMemcpy(d_cdf, d_bins, histo_size, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaFree(d_bins));
}
|
b4749a6ab55da7b28f16f59cf36ab1bd5cf821a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*author: Zeke Elkins
*date: 3/27/14
*description: a CUDA program to add two numbers
*/
#include <iostream>
using namespace std;
//DEVICE code
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; //host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a, b, c
int size = sizeof(int);
//allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
//setup input values
a = 2;
b = 7;
// copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
//launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
//copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
//output result
cout << a << " plus " << b << " equals " << c << endl;
//cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | b4749a6ab55da7b28f16f59cf36ab1bd5cf821a1.cu | /*author: Zeke Elkins
*date: 3/27/14
*description: a CUDA program to add two numbers
*/
#include <iostream>
using namespace std;
//DEVICE code
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; //host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a, b, c
int size = sizeof(int);
//allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//setup input values
a = 2;
b = 7;
// copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
//copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
//output result
cout << a << " plus " << b << " equals " << c << endl;
//cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
a7f9492a6b70997332b6bd1b9f2b0da4f85d0a5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define B 64
#define threadNum 32
const int INF = 1000000000;
void input(char *inFileName);
void output(char *outFileName);
void block_FW();
int ceil(int a, int b);
__global__ void phase1(int* dist, int Round, int n, size_t pitch);
__global__ void phase2(int* dist, int Round, int n, size_t pitch);
__global__ void phase3(int* dist, int Round, int n, size_t pitch);
int n, m;
int *Dist = NULL;
int *device_Dist = NULL;
size_t pitch;
int main(int argc, char* argv[]) {
input(argv[1]);
block_FW();
output(argv[2]);
hipHostFree(Dist);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
hipHostMalloc(&Dist, (size_t)n*n*sizeof(int));
for (int i = 0; i < n; ++ i) {
for (int j = 0; j < n; ++ j) {
Dist[i*n+j] = (i==j) ? 0 : INF;
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]*n+pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "wb");
fwrite(Dist, sizeof(int), n*n, outfile);
fclose(outfile);
}
int ceil(int a, int b) {
return (a + b - 1) / b;
}
void block_FW() {
unsigned int round = ceil(n, B);
dim3 block_p1 = {1, 1};
dim3 block_p2 = {2, round};
dim3 block_p3 = {round, round};
dim3 threads = {threadNum, threadNum};
hipMallocPitch(&device_Dist, &pitch, (size_t)n*sizeof(int), (size_t)n);
hipMemcpy2D(device_Dist, pitch, Dist, (size_t)n*sizeof(int), (size_t)n*sizeof(int), (size_t)n, hipMemcpyHostToDevice);
for (unsigned int r = 0; r < round; ++r) {
hipLaunchKernelGGL(( phase1), dim3(block_p1), dim3(threads), 0, 0, device_Dist, r, n, pitch);
hipLaunchKernelGGL(( phase2), dim3(block_p2), dim3(threads), 0, 0, device_Dist, r, n, pitch);
hipLaunchKernelGGL(( phase3), dim3(block_p3), dim3(threads), 0, 0, device_Dist, r, n, pitch);
}
hipMemcpy2D(Dist, (size_t)n*sizeof(int), device_Dist, pitch, (size_t)n*sizeof(int), (size_t)n, hipMemcpyDeviceToHost);
hipFree(device_Dist);
}
__global__ void phase1(int* dist, int Round, int n, size_t pitch){
int base = Round*B;
int shift = B/threadNum;
int i_st = base + threadIdx.x*shift, i_ed = i_st + shift;
int j_st = base + threadIdx.y*shift, j_ed = j_st + shift;
if(i_ed > n){
i_ed = n;
}
if(j_ed > n){
j_ed = n;
}
__shared__ int sm[B][B];
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
int *dij = (int*)((char*)dist+pitch*i)+j;
sm[i-base][j-base] = *dij;
}
}
__syncthreads();
int len = ((Round+1)*B < n) ? B : n - (Round)*B;
#pragma unroll
for (int k = 0; k < len; ++k) {
#pragma unroll
for(int i = i_st; i<i_ed ; ++i){
#pragma unroll
for(int j = j_st ; j<j_ed ; ++j){
int relax = sm[i-base][k] + sm[k][j-base];
if(relax < sm[i-base][j-base]){
sm[i-base][j-base] = relax;
}
}
}
__syncthreads();
}
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
int *dij = (int*)((char*)dist+pitch*i)+j;
*dij = sm[i-base][j-base];
}
}
}
__global__ void phase2(int* dist, int Round, int n, size_t pitch){
if(blockIdx.y==Round)
return;
__shared__ int sm[2][B][B];
int base_i = (1-blockIdx.x)*Round*B + blockIdx.x*blockIdx.y*B;
int base_j = blockIdx.x*Round*B + (1-blockIdx.x)*blockIdx.y*B;
int shift = B/threadNum;
int i_st = base_i + threadIdx.x*shift, i_ed = i_st + shift;
int j_st = base_j + threadIdx.y*shift, j_ed = j_st + shift;
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
if(i<n && j<n){
int *dij = (int*)((char*)dist+pitch*i)+j;
sm[0][i-base_i][j-base_j] = *dij;
}
if(Round*B+(i-base_i)<n && Round*B+(j-base_j)<n){
int *dkk = (int*)((char*)dist+pitch*(Round*B+(i-base_i))) + Round*B+(j-base_j);
sm[1][i-base_i][j-base_j] = *dkk;
}
}
}
__syncthreads();
if(i_ed > n){
i_ed = n;
}
if(j_ed > n){
j_ed = n;
}
int len = ((Round+1)*B < n) ? B : n - (Round)*B;
int i_offset = i_st-base_i, i_len = i_ed - i_st;
int j_offset = j_st-base_j, j_len = j_ed - j_st;
#pragma unroll
for(int i=i_offset ; i<i_offset+i_len ; ++i){
#pragma unroll
for(int j=j_offset ; j<j_offset+j_len ; ++j){
#pragma unroll
for (int k = 0; k < len; ++k) {
int relax = sm[1-blockIdx.x][i][k] + sm[blockIdx.x][k][j];
if(relax < sm[0][i][j]){
sm[0][i][j] = relax;
}
}
int *dij = (int*)((char*)dist+pitch*(base_i+i))+base_j+j;
*dij = sm[0][i][j];
}
}
}
__global__ void phase3(int* dist, int Round, int n, size_t pitch){
if(blockIdx.x==Round || blockIdx.y==Round)
return;
__shared__ int sm[2][B][B];
int base_i = blockIdx.x*B;
int base_j = blockIdx.y*B;
int shift = B/threadNum;
int i_st = base_i + threadIdx.x*shift, i_ed = i_st + shift;
int j_st = base_j + threadIdx.y*shift, j_ed = j_st + shift;
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
if(i<n && Round*B+(j-base_j)<n){
int *dik = (int*)((char*)dist+pitch*i)+Round*B+(j-base_j);
sm[0][j-base_j][i-base_i] = *dik;
}
if(Round*B+(i-base_i)<n && j<n){
int *dkj = (int*)((char*)dist+pitch*(Round*B+(i-base_i)))+j;
sm[1][i-base_i][j-base_j] = *dkj;
}
}
}
__syncthreads();
if(i_ed > n){
i_ed = n;
}
if(j_ed > n){
j_ed = n;
}
int len = ((Round+1)*B < n) ? B : n - (Round)*B;
int i_offset = i_st-base_i, i_len = i_ed - i_st;
int j_offset = j_st-base_j, j_len = j_ed - j_st;
#pragma unroll
for(int i = 0 ; i < i_len ; ++i){
#pragma unroll
for(int j= 0 ; j < j_len ; ++j){
int *dij = (int*)((char*)dist+pitch*(i_st+i))+j_st+j;
int ans = *dij;
#pragma unroll
for (int k = 0; k < len; ++k) {
int relax = sm[0][k][i_offset+i] + sm[1][k][j_offset+j];
if(relax < ans){
ans = relax;
}
}
*dij = ans;
}
}
} | a7f9492a6b70997332b6bd1b9f2b0da4f85d0a5e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define B 64
#define threadNum 32
const int INF = 1000000000;
void input(char *inFileName);
void output(char *outFileName);
void block_FW();
int ceil(int a, int b);
__global__ void phase1(int* dist, int Round, int n, size_t pitch);
__global__ void phase2(int* dist, int Round, int n, size_t pitch);
__global__ void phase3(int* dist, int Round, int n, size_t pitch);
int n, m;
int *Dist = NULL;
int *device_Dist = NULL;
size_t pitch;
int main(int argc, char* argv[]) {
input(argv[1]);
block_FW();
output(argv[2]);
cudaFreeHost(Dist);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
cudaMallocHost(&Dist, (size_t)n*n*sizeof(int));
for (int i = 0; i < n; ++ i) {
for (int j = 0; j < n; ++ j) {
Dist[i*n+j] = (i==j) ? 0 : INF;
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]*n+pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "wb");
fwrite(Dist, sizeof(int), n*n, outfile);
fclose(outfile);
}
int ceil(int a, int b) {
return (a + b - 1) / b;
}
void block_FW() {
unsigned int round = ceil(n, B);
dim3 block_p1 = {1, 1};
dim3 block_p2 = {2, round};
dim3 block_p3 = {round, round};
dim3 threads = {threadNum, threadNum};
cudaMallocPitch(&device_Dist, &pitch, (size_t)n*sizeof(int), (size_t)n);
cudaMemcpy2D(device_Dist, pitch, Dist, (size_t)n*sizeof(int), (size_t)n*sizeof(int), (size_t)n, cudaMemcpyHostToDevice);
for (unsigned int r = 0; r < round; ++r) {
phase1<<<block_p1, threads>>>(device_Dist, r, n, pitch);
phase2<<<block_p2, threads>>>(device_Dist, r, n, pitch);
phase3<<<block_p3, threads>>>(device_Dist, r, n, pitch);
}
cudaMemcpy2D(Dist, (size_t)n*sizeof(int), device_Dist, pitch, (size_t)n*sizeof(int), (size_t)n, cudaMemcpyDeviceToHost);
cudaFree(device_Dist);
}
__global__ void phase1(int* dist, int Round, int n, size_t pitch){
int base = Round*B;
int shift = B/threadNum;
int i_st = base + threadIdx.x*shift, i_ed = i_st + shift;
int j_st = base + threadIdx.y*shift, j_ed = j_st + shift;
if(i_ed > n){
i_ed = n;
}
if(j_ed > n){
j_ed = n;
}
__shared__ int sm[B][B];
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
int *dij = (int*)((char*)dist+pitch*i)+j;
sm[i-base][j-base] = *dij;
}
}
__syncthreads();
int len = ((Round+1)*B < n) ? B : n - (Round)*B;
#pragma unroll
for (int k = 0; k < len; ++k) {
#pragma unroll
for(int i = i_st; i<i_ed ; ++i){
#pragma unroll
for(int j = j_st ; j<j_ed ; ++j){
int relax = sm[i-base][k] + sm[k][j-base];
if(relax < sm[i-base][j-base]){
sm[i-base][j-base] = relax;
}
}
}
__syncthreads();
}
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
int *dij = (int*)((char*)dist+pitch*i)+j;
*dij = sm[i-base][j-base];
}
}
}
__global__ void phase2(int* dist, int Round, int n, size_t pitch){
if(blockIdx.y==Round)
return;
__shared__ int sm[2][B][B];
int base_i = (1-blockIdx.x)*Round*B + blockIdx.x*blockIdx.y*B;
int base_j = blockIdx.x*Round*B + (1-blockIdx.x)*blockIdx.y*B;
int shift = B/threadNum;
int i_st = base_i + threadIdx.x*shift, i_ed = i_st + shift;
int j_st = base_j + threadIdx.y*shift, j_ed = j_st + shift;
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
if(i<n && j<n){
int *dij = (int*)((char*)dist+pitch*i)+j;
sm[0][i-base_i][j-base_j] = *dij;
}
if(Round*B+(i-base_i)<n && Round*B+(j-base_j)<n){
int *dkk = (int*)((char*)dist+pitch*(Round*B+(i-base_i))) + Round*B+(j-base_j);
sm[1][i-base_i][j-base_j] = *dkk;
}
}
}
__syncthreads();
if(i_ed > n){
i_ed = n;
}
if(j_ed > n){
j_ed = n;
}
int len = ((Round+1)*B < n) ? B : n - (Round)*B;
int i_offset = i_st-base_i, i_len = i_ed - i_st;
int j_offset = j_st-base_j, j_len = j_ed - j_st;
#pragma unroll
for(int i=i_offset ; i<i_offset+i_len ; ++i){
#pragma unroll
for(int j=j_offset ; j<j_offset+j_len ; ++j){
#pragma unroll
for (int k = 0; k < len; ++k) {
int relax = sm[1-blockIdx.x][i][k] + sm[blockIdx.x][k][j];
if(relax < sm[0][i][j]){
sm[0][i][j] = relax;
}
}
int *dij = (int*)((char*)dist+pitch*(base_i+i))+base_j+j;
*dij = sm[0][i][j];
}
}
}
__global__ void phase3(int* dist, int Round, int n, size_t pitch){
if(blockIdx.x==Round || blockIdx.y==Round)
return;
__shared__ int sm[2][B][B];
int base_i = blockIdx.x*B;
int base_j = blockIdx.y*B;
int shift = B/threadNum;
int i_st = base_i + threadIdx.x*shift, i_ed = i_st + shift;
int j_st = base_j + threadIdx.y*shift, j_ed = j_st + shift;
#pragma unroll
for(int i=i_st ; i<i_ed ; ++i){
#pragma unroll
for(int j=j_st ; j<j_ed ; ++j){
if(i<n && Round*B+(j-base_j)<n){
int *dik = (int*)((char*)dist+pitch*i)+Round*B+(j-base_j);
sm[0][j-base_j][i-base_i] = *dik;
}
if(Round*B+(i-base_i)<n && j<n){
int *dkj = (int*)((char*)dist+pitch*(Round*B+(i-base_i)))+j;
sm[1][i-base_i][j-base_j] = *dkj;
}
}
}
__syncthreads();
if(i_ed > n){
i_ed = n;
}
if(j_ed > n){
j_ed = n;
}
int len = ((Round+1)*B < n) ? B : n - (Round)*B;
int i_offset = i_st-base_i, i_len = i_ed - i_st;
int j_offset = j_st-base_j, j_len = j_ed - j_st;
#pragma unroll
for(int i = 0 ; i < i_len ; ++i){
#pragma unroll
for(int j= 0 ; j < j_len ; ++j){
int *dij = (int*)((char*)dist+pitch*(i_st+i))+j_st+j;
int ans = *dij;
#pragma unroll
for (int k = 0; k < len; ++k) {
int relax = sm[0][k][i_offset+i] + sm[1][k][j_offset+j];
if(relax < ans){
ans = relax;
}
}
*dij = ans;
}
}
} |
53c2e11fedc3e04ad269bdb903a4f4da120dfd86.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__device__ int ptr=0;
__global__ void a()
{
int b[100];
//atomicAdd(&ptr,1);
b[0]=ptr;
#pragma unroll
for(int i=1; i<200; i++)
{
// for(int j=1;j<90;j++)
{
//b[i][j]=b[i-1][j-1]+1;
b[i] = b[i-1]+1;
}
}
ptr=b[7]+1;
}
int main()
{
hipLaunchKernelGGL(( a), dim3(1),dim3(1), 0, 0, );
return 0;
}
| 53c2e11fedc3e04ad269bdb903a4f4da120dfd86.cu | #include <cuda_runtime.h>
#include <cuda.h>
__device__ int ptr=0;
__global__ void a()
{
int b[100];
//atomicAdd(&ptr,1);
b[0]=ptr;
#pragma unroll
for(int i=1; i<200; i++)
{
// for(int j=1;j<90;j++)
{
//b[i][j]=b[i-1][j-1]+1;
b[i] = b[i-1]+1;
}
}
ptr=b[7]+1;
}
int main()
{
a<<<1,1>>>();
return 0;
}
|
4283d613f93fab12d48a7f7ea02d50232bc4200c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2008-2013 NVIDIA Corporation
* Modifications Copyright 2019 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unittest/unittest.h>
#include <thrust/gather.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/retag.h>
#include <thrust/sequence.h>
#include <algorithm>
THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestGatherSimple(void)
{
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather(map.begin(), map.end(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 6);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 1);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 2);
}
DECLARE_INTEGRAL_VECTOR_UNITTEST(TestGatherSimple);
template<typename InputIterator, typename RandomAccessIterator, typename OutputIterator>
OutputIterator gather(my_system &system, InputIterator, InputIterator, RandomAccessIterator, OutputIterator result)
{
system.validate_dispatch();
return result;
}
void TestGatherDispatchExplicit()
{
thrust::device_vector<int> vec(1);
my_system sys(0);
thrust::gather(sys,
vec.begin(),
vec.end(),
vec.begin(),
vec.begin());
ASSERT_EQUAL(true, sys.is_valid());
}
DECLARE_UNITTEST(TestGatherDispatchExplicit);
template<typename InputIterator, typename RandomAccessIterator, typename OutputIterator>
OutputIterator gather(my_tag, InputIterator, InputIterator, RandomAccessIterator, OutputIterator result)
{
*result = 13;
return result;
}
void TestGatherDispatchImplicit()
{
thrust::device_vector<int> vec(1);
thrust::gather(thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.end()),
thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.begin()));
ASSERT_EQUAL(13, vec.front());
}
DECLARE_UNITTEST(TestGatherDispatchImplicit);
template <typename T>
void TestGather(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin());
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGather);
template <typename T>
void TestGatherToDiscardIterator(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
thrust::discard_iterator<> h_result =
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> d_result =
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherToDiscardIterator);
template <class Vector>
void TestGatherIfSimple(void)
{
Vector flg(5); // predicate array
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0;
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 0);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 0);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 0);
}
DECLARE_INTEGRAL_VECTOR_UNITTEST(TestGatherIfSimple);
template <typename T>
struct is_even_gather_if
{
__host__ __device__
bool operator()(const T i) const
{
return (i % 2) == 0;
}
};
template<typename InputIterator1,
typename InputIterator2,
typename RandomAccessIterator,
typename OutputIterator>
OutputIterator gather_if(my_system &system,
InputIterator1,
InputIterator1,
InputIterator2,
RandomAccessIterator,
OutputIterator result)
{
system.validate_dispatch();
return result;
}
void TestGatherIfDispatchExplicit()
{
thrust::device_vector<int> vec(1);
my_system sys(0);
thrust::gather_if(sys,
vec.begin(),
vec.end(),
vec.begin(),
vec.begin(),
vec.begin());
ASSERT_EQUAL(true, sys.is_valid());
}
DECLARE_UNITTEST(TestGatherIfDispatchExplicit);
template<typename InputIterator1,
typename InputIterator2,
typename RandomAccessIterator,
typename OutputIterator>
OutputIterator gather_if(my_tag,
InputIterator1,
InputIterator1,
InputIterator2,
RandomAccessIterator,
OutputIterator result)
{
*result = 13;
return result;
}
void TestGatherIfDispatchImplicit()
{
thrust::device_vector<int> vec(1);
thrust::gather_if(thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.end()),
thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.begin()));
ASSERT_EQUAL(13, vec.front());
}
DECLARE_UNITTEST(TestGatherIfDispatchImplicit);
template <typename T>
void TestGatherIf(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>());
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIf);
template <typename T>
void TestGatherIfToDiscardIterator(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
thrust::discard_iterator<> h_result =
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> d_result =
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIfToDiscardIterator);
template <typename Vector>
void TestGatherCountingIterator(void)
{
Vector source(10);
thrust::sequence(source.begin(), source.end(), 0);
Vector map(10);
thrust::sequence(map.begin(), map.end(), 0);
Vector output(10);
// source has any_system_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(map.begin(),
map.end(),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
// map has any_system_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)source.size()),
source.begin(),
output.begin());
ASSERT_EQUAL(output, map);
// source and map have any_system_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)output.size()),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
}
DECLARE_INTEGRAL_VECTOR_UNITTEST(TestGatherCountingIterator);
THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
| 4283d613f93fab12d48a7f7ea02d50232bc4200c.cu | /*
* Copyright 2008-2013 NVIDIA Corporation
* Modifications Copyright© 2019 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unittest/unittest.h>
#include <thrust/gather.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/retag.h>
#include <thrust/sequence.h>
#include <algorithm>
THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestGatherSimple(void)
{
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather(map.begin(), map.end(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 6);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 1);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 2);
}
DECLARE_INTEGRAL_VECTOR_UNITTEST(TestGatherSimple);
template<typename InputIterator, typename RandomAccessIterator, typename OutputIterator>
OutputIterator gather(my_system &system, InputIterator, InputIterator, RandomAccessIterator, OutputIterator result)
{
system.validate_dispatch();
return result;
}
void TestGatherDispatchExplicit()
{
thrust::device_vector<int> vec(1);
my_system sys(0);
thrust::gather(sys,
vec.begin(),
vec.end(),
vec.begin(),
vec.begin());
ASSERT_EQUAL(true, sys.is_valid());
}
DECLARE_UNITTEST(TestGatherDispatchExplicit);
template<typename InputIterator, typename RandomAccessIterator, typename OutputIterator>
OutputIterator gather(my_tag, InputIterator, InputIterator, RandomAccessIterator, OutputIterator result)
{
*result = 13;
return result;
}
void TestGatherDispatchImplicit()
{
thrust::device_vector<int> vec(1);
thrust::gather(thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.end()),
thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.begin()));
ASSERT_EQUAL(13, vec.front());
}
DECLARE_UNITTEST(TestGatherDispatchImplicit);
template <typename T>
void TestGather(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin());
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGather);
template <typename T>
void TestGatherToDiscardIterator(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
thrust::discard_iterator<> h_result =
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> d_result =
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), thrust::make_discard_iterator());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherToDiscardIterator);
template <class Vector>
void TestGatherIfSimple(void)
{
Vector flg(5); // predicate array
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0;
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 0);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 0);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 0);
}
DECLARE_INTEGRAL_VECTOR_UNITTEST(TestGatherIfSimple);
template <typename T>
struct is_even_gather_if
{
__host__ __device__
bool operator()(const T i) const
{
return (i % 2) == 0;
}
};
template<typename InputIterator1,
typename InputIterator2,
typename RandomAccessIterator,
typename OutputIterator>
OutputIterator gather_if(my_system &system,
InputIterator1,
InputIterator1,
InputIterator2,
RandomAccessIterator,
OutputIterator result)
{
system.validate_dispatch();
return result;
}
void TestGatherIfDispatchExplicit()
{
thrust::device_vector<int> vec(1);
my_system sys(0);
thrust::gather_if(sys,
vec.begin(),
vec.end(),
vec.begin(),
vec.begin(),
vec.begin());
ASSERT_EQUAL(true, sys.is_valid());
}
DECLARE_UNITTEST(TestGatherIfDispatchExplicit);
template<typename InputIterator1,
typename InputIterator2,
typename RandomAccessIterator,
typename OutputIterator>
OutputIterator gather_if(my_tag,
InputIterator1,
InputIterator1,
InputIterator2,
RandomAccessIterator,
OutputIterator result)
{
*result = 13;
return result;
}
void TestGatherIfDispatchImplicit()
{
thrust::device_vector<int> vec(1);
thrust::gather_if(thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.end()),
thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.begin()),
thrust::retag<my_tag>(vec.begin()));
ASSERT_EQUAL(13, vec.front());
}
DECLARE_UNITTEST(TestGatherIfDispatchImplicit);
template <typename T>
void TestGatherIf(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>());
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIf);
template <typename T>
void TestGatherIfToDiscardIterator(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
thrust::discard_iterator<> h_result =
thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> d_result =
thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>());
thrust::discard_iterator<> reference(n);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIfToDiscardIterator);
template <typename Vector>
void TestGatherCountingIterator(void)
{
Vector source(10);
thrust::sequence(source.begin(), source.end(), 0);
Vector map(10);
thrust::sequence(map.begin(), map.end(), 0);
Vector output(10);
// source has any_system_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(map.begin(),
map.end(),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
// map has any_system_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)source.size()),
source.begin(),
output.begin());
ASSERT_EQUAL(output, map);
// source and map have any_system_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)output.size()),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
}
DECLARE_INTEGRAL_VECTOR_UNITTEST(TestGatherCountingIterator);
THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
7650548dc22ed95888c4c3ebc78ad30d31a3f227.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdlib.h"
#include "cudpp.h"
#include "math.h"
#include "stdio.h"
#include "libconfig.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "linux/limits.h"
#include "limits.h"
#include "hipfft.h"
#include "omp.h"
#define PI (3.14159265)
#define EV_IN_ERGS (1.60217646e-12)
#define CURAND_CALL(x) { if((x) != HIPRAND_STATUS_SUCCESS) { \
printf("Error %d at %s:%d\n", x, __FILE__,__LINE__); \
exit(EXIT_FAILURE);}}
#define CUDA_CALL(x) { if((x) != hipSuccess) { \
printf("Cuda error %d at %s:%d: ", x, __FILE__,__LINE__); \
printf("%s\n", hipGetErrorString(x)); \
exit(EXIT_FAILURE);}}
typedef struct pulse_cfg_str
{
float A0;
float Am;
float T;
float fm;
float fce;
float z0;
float dz;
float pulse_ratio;
}pulse_t;
typedef struct simulation_str
{
float tstart;
float tstop;
unsigned int nh;
float dt;
unsigned int nelectrons;
unsigned int ncells;
float z1;
float z2;
float plasma_density;
int static_ez;
}simulation_t;
typedef struct electron_str
{
float T;
float m;
float q;
}
electron_t;
typedef struct global_settings_str
{
char* msavedir;
char* vsavedir;
char* msavefile;
char* vsavefile;
unsigned int max_gpu_threads;
unsigned int rseed;
unsigned int save_every_n;
}global_setting_t;
__device__ float diff(float k, float dx)
{
float a = k * dx;
return sin(a)/a;
}
__device__ float diff2(float k, float dx)
{
float a = diff(k, dx);
return a*a;
}
void set_speed_maxwell_cuda(hiprandGenerator_t cuda_r, float* d_v, float sigma, unsigned int nelectrons)
{
CURAND_CALL(hiprandGenerateNormal(cuda_r, d_v, nelectrons, 0, sigma));
}
__global__ void transform_uniform_distribution(float* d_p, float min, float max)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
d_p[n] = (max - min)*d_p[n] + min;
}
void set_pos_uniform_cuda(hiprandGenerator_t cuda_r, float* d_r, float z1, float z2, unsigned int nelectrons, unsigned int max_threads)
{
CURAND_CALL(hiprandGenerateUniform(cuda_r, d_r, nelectrons));
hipLaunchKernelGGL(( transform_uniform_distribution), dim3(nelectrons/max_threads), dim3(max_threads), 0, 0, d_r, z1, z2);
}
void calculate_v2(float* vx, float* vy, unsigned int ntpoints, float* v)
{
unsigned int i;
for (i = 0; i < ntpoints; ++i)
{
v[i] = vx[i]*vx[i] + vy[i]*vy[i];
}
}
int get_pulse_config(config_t* config, pulse_t* pulse)
{
config_setting_t* setting = config_lookup(config, "pulse.A0");
pulse->A0 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.Am");
pulse->Am = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.fce");
pulse->fce = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.fm");
pulse->fm = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.T");
pulse->T = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.z0");
pulse->z0 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.dz");
pulse->dz = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.pulse_ratio");
pulse->pulse_ratio = (float)config_setting_get_float(setting);
return 0;
}
int get_global_config(config_t* config, global_setting_t* global_settings)
{
config_setting_t* setting = config_lookup(config, "global.msavefile");
global_settings->msavefile = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.msavedir");
global_settings->msavedir = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.vsavefile");
global_settings->vsavefile = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.vsavedir");
global_settings->vsavedir = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.max_gpu_threads");
global_settings->max_gpu_threads = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "global.rseed");
global_settings->rseed = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "global.save_every_n");
global_settings->save_every_n = config_setting_get_int(setting);
return 0;
}
int get_simulation_config(config_t* config, simulation_t* simulation)
{
config_setting_t* setting = config_lookup(config, "simulation.tstart");
simulation->tstart = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.tstop");
simulation->tstop = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.dt");
simulation->dt = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.nharm");
simulation->nh = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "simulation.nelectrons");
simulation->nelectrons = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "simulation.ncells");
simulation->ncells = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "simulation.z1");
simulation->z1 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.z2");
simulation->z2 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.plasma_density");
simulation->plasma_density = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.static_ez");
simulation->static_ez = config_setting_get_bool(setting);
return 0;
}
int get_electron_config(config_t* config, electron_t* electron)
{
config_setting_t* setting = config_lookup(config, "electron.T");
electron->T = (float)config_setting_get_float(setting);
setting = config_lookup(config, "electron.m");
electron->m = (float)config_setting_get_float(setting);
setting = config_lookup(config, "electron.q");
electron->q = (float)config_setting_get_float(setting);
return 0;
}
__global__ void kernel_generate_ambmfm(float am, float fm, float pulse_ratio, float nh, float* d_fm, float* d_am, float* d_bm)
{
float tm = 2*PI/fm;
float tm1 = tm/pulse_ratio;
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i==0)
{
d_bm[i] = am*(2*tm1-tm)/tm;
d_am[i] = 0;
}
else
{
d_am[i] = 2*am*(1-cos(i*fm*tm1))/(i*PI);
d_bm[i] = 2*am*sin(i*fm*tm1)/(i*PI);
}
d_fm[i] = fm*i;
}
__global__ void generate_ameandr(float* d_am, float* d_bm, float* d_fm, float t, unsigned int nh, float* d_a)
{
unsigned int i;
float a = 0;
for (i = 0; i < nh; ++i)
{
a += d_am[i]*sin(d_fm[i]*t) + d_bm[i]*cos(d_fm[i]*t);
}
*d_a = a;
}
int my_read_config_file(char* file, config_t* config)
{
config_init(config);
return config_read_file(config, file);
}
__global__ void global_associate_electrons_with_cells(float* d_z, float cellsize, unsigned int* d_association)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int ncell = floor(d_z[n]/cellsize);
d_association[n] = ncell;
}
__device__ float get_ex_field(float A0, float pulse_duration,
float z, unsigned int nt,
float z0, float dz,
float* am, float* tn)
{
float az = (fabs(z - z0) <= dz) ? 1 : 0;
float at = (tn[nt] - pulse_duration) < 0 ? 1 : 0;
return 0.5*az*at*(am[nt] + A0);
}
__global__ void update_ex_field(float A0, float* d_am, float fce, float dt,
unsigned int nt, float pulse_duration, float z0, float pulse_dz,
float cellsize, float* d_ex, float q, float m)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
float z = (n + 0.5)*cellsize;
float t = nt*dt;
float sfce = sin(fce*t);
float az = (fabs(z - z0) <= pulse_dz) ? 1 : 0;
float at = (t - pulse_duration) < 0 ? 1 : 0;
//d_ex[n] = get_ex_field(A0, pulse_duration, z, nt, z0, dz, d_am, tn)*sfce*q/m;
d_ex[n] = at*az*0.5*(A0 + *d_am)*sfce*q/m;
}
__global__ void kernel_generate_tn(float tstart, float dt, float* d_tn)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
d_tn[n] = tstart + n*dt;
}
__device__ void rotate_particle(float *vx, float* vy, float E, float fce, float delta)
{
float vx_m;
float vy_m;
float S = sin(fce*delta);
float C = cos(fce*delta);
vx_m = *vx + delta*E/2.;
vy_m = *vy;
*vx = (vx_m*C + vy_m*S) + delta*E/2.;
*vy = (-vx_m*S + vy_m*C);
}
__global__ void trace_electrons_single_step(float* d_vx, float* d_vy, float* d_vz,
float* d_z, float* d_fx, float* d_fz,
unsigned int* d_associate,
float fce, float delta)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
float vxt = d_vx[n];
float vyt = d_vy[n];
float dt = delta;
float tfce = fce;
unsigned int ncell = d_associate[n];
rotate_particle(&vxt, &vyt, d_fx[ncell], tfce, dt);
d_vx[n] = vxt;
d_vy[n] = vyt;
d_vz[n] += d_fz[ncell]*delta;
d_z[n] += d_vz[n]*dt;
}
__global__ void postproc(float* d_z, float* d_vx, float* d_vy,
float zmin, float zmax, float sigma, hiprandState_t* d_states)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int nr = threadIdx.x;
float L = zmax - zmin;
hiprandState_t rstate = d_states[nr];
if(d_z[n] > zmax)
{
d_z[n] -= L;
d_vx[n] = sigma*hiprand_normal(&rstate);
d_vy[n] = sigma*hiprand_normal(&rstate);
}
if(d_z[n] < zmin)
{
d_z[n] += L;
d_vx[n] = sigma*hiprand_normal(&rstate);
d_vy[n] = sigma*hiprand_normal(&rstate);
}
d_states[nr]=rstate;
}
__global__ void calculate_momentum_zdistribution(float* d_vx, float* d_vy,
unsigned int* d_associate,
float* d_m, unsigned int* d_n,
unsigned int nelectrons, float mass)
{
unsigned int ncell = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int i;
unsigned int ne = 0;
float m = 0;
for (i = 0; i < nelectrons; ++i)
{
if (d_associate[i] == ncell)
{
float vx = d_vx[i];
float vy = d_vy[i];
ne++;
m += vx*vx + vy*vy;
}
}
d_n[ncell] = ne;
d_m[ncell] = 0.5*mass*m/ne;
}
void dump(char* savedir, char* filename, unsigned int n, float* d_vx, float* d_vy,
unsigned int* d_cell_electron_association, float* d_m, unsigned int* d_n,
unsigned int nelectrons, unsigned int ncells, float m, float cellsize, float z1, float dt,
unsigned int max_threads)
{
FILE* to;
float* momentum = (float*)malloc(sizeof(float)*ncells);
unsigned int i;
char filenamei[PATH_MAX];
sprintf(filenamei, "%s/%s_%d.dat", savedir, filename, n);
to = fopen(filenamei, "w");
hipLaunchKernelGGL(( calculate_momentum_zdistribution), dim3(ncells/max_threads), dim3(max_threads), 0, 0, d_vx, d_vy, d_cell_electron_association, d_m, d_n, nelectrons, m);
hipMemcpy(momentum, d_m, sizeof(float)*ncells, hipMemcpyDeviceToHost);
for (i = 0; i < ncells; ++i)
{
fprintf(to, "%e\t%e\n", z1 + (i+0.5)*cellsize, momentum[i]);
}
free(momentum);
fclose(to);
printf("file %d dumped\n", n);
}
__global__ void setup_rstates(hiprandState_t* states, unsigned long rseed)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
hiprand_init(rseed, n, 0, &states[n]);
}
__global__ void do_calculate_rho_cuda(unsigned int* d_associate,
unsigned int* d_rho)
{
unsigned int nelectron = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int ncell = d_associate[nelectron];
atomicInc(&d_rho[ncell], UINT_MAX);
}
__global__ void generate_kn(float k0, float dx, float* d_k)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
float k = k0*(n+1);
d_k[n] = k*k*diff2(k,dx/2);
}
__global__ void copy_rho_to_cufft(unsigned int* d_rho, hipfftComplex* data, float coeff)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
data[n] = make_cuFloatComplex (coeff*(float)d_rho[n],0);
}
__global__ void copy_cufft_to_phi(float* d_phi, hipfftComplex* data)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
hipfftComplex thisd = data[n];
d_phi[n] = cuCrealf(thisd);
}
__global__ void poisson_harmonics_transform(float* d_k, hipfftComplex* data, unsigned int nharm)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
hipfftComplex thisd = data[n+1];
float r;
float i;
r = 4*PI*cuCrealf(thisd)/d_k[n];
i = 4*PI*cuCimagf(thisd)/d_k[n];
data[n+1] = make_cuFloatComplex (r,i);
data[nharm/2 + n] = cuConjf(data[nharm/2 - n]);
}
__global__ void zero_harm_hack_for_fftdata(hipfftComplex* data)
{
data[0] = make_cuFloatComplex(0,0);
}
__global__ void calculate_ez_cuda(float* d_phi, float* d_ez, unsigned int ncells, float q, float m, float cellsize)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int nl = n - 1;
unsigned int nr = n + 1;
float qm = q/m;
if (n==0)
{
nl = ncells-1;
nr = 1;
}
if (n==ncells-1)
{
nl = ncells-2;
nr = 0;
}
d_ez[n]= -qm*(d_phi[nr] - d_phi[nl])/2/cellsize/ncells;
}
void update_ez_cuda(hipfftHandle plan, unsigned int* d_rho, float* d_k, float* d_ez, float* d_phi,
hipfftComplex* data, unsigned int ncells, unsigned int max_threads,
float dens_coeff, float q, float m, float cellsize)
{
hipLaunchKernelGGL(( copy_rho_to_cufft), dim3(ncells/max_threads), dim3(max_threads), 0, 0, d_rho, data, dens_coeff);
hipfftExecC2C(plan, data, data, HIPFFT_FORWARD);
hipLaunchKernelGGL(( poisson_harmonics_transform), dim3(ncells/max_threads/2), dim3(max_threads), 0, 0, d_k, data, ncells);
hipLaunchKernelGGL(( zero_harm_hack_for_fftdata), dim3(1),dim3(1), 0, 0, data);
hipfftExecC2C(plan, data, data, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( copy_cufft_to_phi), dim3(ncells/max_threads), dim3(max_threads), 0, 0, d_phi, data);
hipLaunchKernelGGL(( calculate_ez_cuda), dim3(ncells/max_threads), dim3(max_threads), 0, 0, d_phi, d_ez, ncells, q,m, cellsize);
}
void dump_vperp(char* savedir, char* savefile, unsigned int n,
float* d_vx, float* d_vy, float* vx, float* vy,
unsigned int* d_associate, unsigned int* associate,
unsigned int nelectrons, unsigned int ncells, float m)
{
unsigned int i;
char fullpath[PATH_MAX];
CUDA_CALL(hipMemcpy(vx, d_vx, sizeof(float)*nelectrons, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(vy, d_vy, sizeof(float)*nelectrons, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(associate, d_associate, sizeof(unsigned int)*nelectrons, hipMemcpyDeviceToHost));
sprintf(fullpath, "%s/%s_%d.dat", savedir, savefile, n);
FILE* to;
#pragma omp parallel for
for (i=0; i < nelectrons; ++i)
{
float vxt = vx[i];
vx[i] = 0.5*m*(vxt*vxt+vy[i]*vy[i]);
}
sprintf(fullpath, "%s/%s_%d_vperp.dat", savedir, savefile, n);
to = fopen(fullpath, "w");
fwrite(vx, sizeof(float), nelectrons, to);
fclose(to);
sprintf(fullpath, "%s/%s_%d_association.dat", savedir, savefile, n);
to = fopen(fullpath, "w");
fwrite(associate, sizeof(unsigned int), nelectrons, to);
fclose(to);
printf("Particle speed and distribution dumped: %d\n", n);
}
int main(int argc, char** argv)
{
char* config_file = argv[1];
printf("Read the configuration in %s\n", config_file);
config_t configuration;
pulse_t pulse;
simulation_t simulation;
global_setting_t global_settings;
electron_t electron;
my_read_config_file(config_file, &configuration);
get_pulse_config(&configuration, &pulse);
get_simulation_config(&configuration, &simulation);
get_global_config(&configuration, &global_settings);
get_electron_config(&configuration, &electron);
float fce = 2*PI*pulse.fce;
float fm = 2*PI*pulse.fm;
float Am = pulse.Am;
float A0 = pulse.A0;
float pulse_dz = pulse.dz;
float pulse_z0 = pulse.z0;
unsigned int nh = simulation.nh;
float tstart = simulation.tstart;
float tstop = simulation.tstop;
float dt = simulation.dt;
unsigned int ntpoints = ceil((tstop-tstart)/dt);
float pulse_duration = pulse.T;
float pulse_ratio = pulse.pulse_ratio;
unsigned int ncells = simulation.ncells;
unsigned int nelectrons = simulation.nelectrons;
unsigned int max_threads = global_settings.max_gpu_threads;
unsigned int rseed = global_settings.rseed;
float z1 = simulation.z1;
float z2 = simulation.z2;
float dz = (z2-z1)/ncells;
float q = electron.q;
float m = electron.m;
float Te = electron.T;
float sigma = sqrt(EV_IN_ERGS*Te/m);
float plasma_density = simulation.plasma_density;
hiprandGenerator_t cuda_r;
char* msavefile = global_settings.msavefile;
char* msavedir = global_settings.msavedir;
char* vsavefile = global_settings.vsavefile;
char* vsavedir = global_settings.vsavedir;
int static_ez = simulation.static_ez;
unsigned int save_every_n = global_settings.save_every_n;
printf("Allocate memory\n");
hiprandState_t* d_rstates;
CUDA_CALL(hipMalloc(&d_rstates, sizeof(hiprandState_t)*max_threads));
hipLaunchKernelGGL(( setup_rstates), dim3(1),dim3(max_threads), 0, 0, d_rstates, rseed);
CURAND_CALL(hiprandCreateGenerator(&cuda_r, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(cuda_r, rseed));
float* d_vx = NULL;
float* d_vy = NULL;
float* d_vz = NULL;
float* d_z = NULL;
float* d_ex = NULL;
unsigned int* d_association = NULL;
unsigned int* d_buffer = NULL;
unsigned int* n = (unsigned int*)malloc(sizeof(unsigned int)*ncells);
unsigned int* association = (unsigned int*)malloc(sizeof(unsigned int)*nelectrons);
float* vx = (float*)malloc(sizeof(float)*nelectrons);
float* vy = (float*)malloc(sizeof(float)*nelectrons);
float* d_am = NULL;
float* d_bm;
float* d_fm = NULL;
float* d_a = NULL;
float* d_m = NULL;
unsigned int* d_n = NULL;
unsigned int* d_rho;
float* ez = (float*)malloc(sizeof(float)*ncells);
float* d_ez;
float* d_phi;
float density_simulation_coeff = q*plasma_density*(z2 - z1)/nelectrons/dz;
float* d_k;
hipfftComplex *d_fft_data;
hipfftHandle plan;
CUDA_CALL(hipMalloc((void**)&d_fft_data, sizeof(hipfftComplex)*ncells));
hipfftPlan1d(&plan, ncells, HIPFFT_C2C, 1);
CUDA_CALL(hipMalloc(&d_vx, sizeof(float)*nelectrons));
CUDA_CALL(hipMalloc(&d_vy, sizeof(float)*nelectrons));
CUDA_CALL(hipMalloc(&d_vz, sizeof(float)*nelectrons));
CUDA_CALL(hipMalloc(&d_z, sizeof(float)*nelectrons));
CUDA_CALL(hipMalloc(&d_ex, sizeof(float)*ncells));
CUDA_CALL(hipMalloc(&d_am, sizeof(float)*nh));
CUDA_CALL(hipMalloc(&d_bm, sizeof(float)*nh));
CUDA_CALL(hipMalloc(&d_fm, sizeof(float)*nh));
CUDA_CALL(hipMalloc(&d_a, sizeof(float)));
CUDA_CALL(hipMalloc(&d_m, sizeof(float)*ncells));
CUDA_CALL(hipMalloc(&d_n, sizeof(unsigned int)*ncells));
CUDA_CALL(hipMalloc(&d_rho, sizeof(unsigned int)*ncells));
CUDA_CALL(hipMalloc(&d_ez, sizeof(float)*ncells));
CUDA_CALL(hipMalloc(&d_phi, sizeof(float)*ncells));
CUDA_CALL(hipMalloc(&d_k, sizeof(float)*ncells/2));
CUDA_CALL(hipMalloc(&d_association, sizeof(unsigned int)*nelectrons));
CUDA_CALL(hipMalloc(&d_buffer, sizeof(unsigned int)*nelectrons));
printf("Preparing the initial data\n");
CUDA_CALL(hipMemset(d_n, 0, sizeof(float)*ncells));
CUDA_CALL(hipMemset(d_m, 0, sizeof(float)*ncells));
CUDA_CALL(hipMemset(d_ez, 0, sizeof(float)*ncells));
hipLaunchKernelGGL(( kernel_generate_ambmfm), dim3(1),dim3(nh), 0, 0, Am, fm, pulse_ratio, nh, d_fm, d_am, d_bm);
set_speed_maxwell_cuda(cuda_r, d_vx, sigma, nelectrons);
set_speed_maxwell_cuda(cuda_r, d_vy, sigma, nelectrons);
set_speed_maxwell_cuda(cuda_r, d_vz, sigma, nelectrons);
set_pos_uniform_cuda(cuda_r, d_z, z1, z2, nelectrons, max_threads);
hipLaunchKernelGGL(( generate_kn), dim3(ncells/max_threads/2), dim3(max_threads), 0, 0, 2*PI/(z2-z1), dz, d_k);
unsigned int i = 0;
printf("Start the calculations!\n");
for(i = 0; i < ntpoints; ++i)
{
//printf("cycle number: %i\n", i);
hipLaunchKernelGGL(( generate_ameandr), dim3(1), dim3(1), 0, 0, d_am, d_bm, d_fm, i*dt, nh, d_a);
hipLaunchKernelGGL(( update_ex_field), dim3(ncells/max_threads), dim3(max_threads), 0, 0, A0, d_a, fce, dt, i,
pulse_duration,
pulse_z0, pulse_dz, dz,
d_ex, q, m);
hipLaunchKernelGGL(( postproc), dim3(nelectrons/max_threads), dim3(max_threads), 0, 0, d_z, d_vx, d_vy, z1, z2, sigma, d_rstates);
hipLaunchKernelGGL(( global_associate_electrons_with_cells), dim3(nelectrons/max_threads), dim3(max_threads), 0, 0, d_z,
dz,
d_association);
if(static_ez)
{
CUDA_CALL(hipMemset(d_rho, 0, sizeof(unsigned int)*ncells));
hipLaunchKernelGGL(( do_calculate_rho_cuda), dim3(nelectrons/max_threads), dim3(max_threads), 0, 0, d_association, d_rho);
update_ez_cuda(plan, d_rho, d_k, d_ez, d_phi, d_fft_data, ncells,
max_threads, density_simulation_coeff,q,m,dz );
}
hipLaunchKernelGGL(( trace_electrons_single_step), dim3(nelectrons/max_threads), dim3(max_threads), 0, 0, d_vx, d_vy, d_vz,
d_z, d_ex, d_ez,
d_association,
fce, dt);
if ((i%save_every_n) == 0)
{
dump(msavedir, msavefile, i/save_every_n, d_vx, d_vy, d_association, d_m,
d_n, nelectrons, ncells, m, dz, z1, dt, max_threads);
dump_vperp(vsavedir, vsavefile, i/save_every_n,
d_vx, d_vy, vx, vy,
d_association, association,
nelectrons, ncells, m);
}
}
printf("The calculations are done!\n");
printf("Free the memory\n");
CUDA_CALL(hipFree(d_vx));
CUDA_CALL(hipFree(d_vy));
CUDA_CALL(hipFree(d_vz));
CUDA_CALL(hipFree(d_z));
CUDA_CALL(hipFree(d_ex));
CUDA_CALL(hipFree(d_am));
CUDA_CALL(hipFree(d_bm));
CUDA_CALL(hipFree(d_fm));
CUDA_CALL(hipFree(d_a));
CUDA_CALL(hipFree(d_association));
CUDA_CALL(hipFree(d_m));
CUDA_CALL(hipFree(d_n));
CUDA_CALL(hipFree(d_rho));
CUDA_CALL(hipFree(d_ez));
CUDA_CALL(hipFree(d_rstates));
CUDA_CALL(hipFree(d_fft_data));
CUDA_CALL(hipFree(d_phi));
CUDA_CALL(hipFree(d_buffer));
free(n);
free(ez);
free(association);
free(vx);
free(vy);
hipfftDestroy(plan);
CURAND_CALL(hiprandDestroyGenerator(cuda_r));
config_destroy (&configuration);
printf("The programm is done!\n");
}
| 7650548dc22ed95888c4c3ebc78ad30d31a3f227.cu | #include "stdlib.h"
#include "cudpp.h"
#include "math.h"
#include "stdio.h"
#include "libconfig.h"
#include "curand.h"
#include "curand_kernel.h"
#include "linux/limits.h"
#include "limits.h"
#include "cufft.h"
#include "omp.h"
#define PI (3.14159265)
#define EV_IN_ERGS (1.60217646e-12)
#define CURAND_CALL(x) { if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error %d at %s:%d\n", x, __FILE__,__LINE__); \
exit(EXIT_FAILURE);}}
#define CUDA_CALL(x) { if((x) != cudaSuccess) { \
printf("Cuda error %d at %s:%d: ", x, __FILE__,__LINE__); \
printf("%s\n", cudaGetErrorString(x)); \
exit(EXIT_FAILURE);}}
typedef struct pulse_cfg_str
{
float A0;
float Am;
float T;
float fm;
float fce;
float z0;
float dz;
float pulse_ratio;
}pulse_t;
typedef struct simulation_str
{
float tstart;
float tstop;
unsigned int nh;
float dt;
unsigned int nelectrons;
unsigned int ncells;
float z1;
float z2;
float plasma_density;
int static_ez;
}simulation_t;
typedef struct electron_str
{
float T;
float m;
float q;
}
electron_t;
typedef struct global_settings_str
{
char* msavedir;
char* vsavedir;
char* msavefile;
char* vsavefile;
unsigned int max_gpu_threads;
unsigned int rseed;
unsigned int save_every_n;
}global_setting_t;
__device__ float diff(float k, float dx)
{
float a = k * dx;
return sin(a)/a;
}
__device__ float diff2(float k, float dx)
{
float a = diff(k, dx);
return a*a;
}
void set_speed_maxwell_cuda(curandGenerator_t cuda_r, float* d_v, float sigma, unsigned int nelectrons)
{
CURAND_CALL(curandGenerateNormal(cuda_r, d_v, nelectrons, 0, sigma));
}
__global__ void transform_uniform_distribution(float* d_p, float min, float max)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
d_p[n] = (max - min)*d_p[n] + min;
}
void set_pos_uniform_cuda(curandGenerator_t cuda_r, float* d_r, float z1, float z2, unsigned int nelectrons, unsigned int max_threads)
{
CURAND_CALL(curandGenerateUniform(cuda_r, d_r, nelectrons));
transform_uniform_distribution<<<nelectrons/max_threads, max_threads>>>(d_r, z1, z2);
}
void calculate_v2(float* vx, float* vy, unsigned int ntpoints, float* v)
{
unsigned int i;
for (i = 0; i < ntpoints; ++i)
{
v[i] = vx[i]*vx[i] + vy[i]*vy[i];
}
}
int get_pulse_config(config_t* config, pulse_t* pulse)
{
config_setting_t* setting = config_lookup(config, "pulse.A0");
pulse->A0 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.Am");
pulse->Am = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.fce");
pulse->fce = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.fm");
pulse->fm = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.T");
pulse->T = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.z0");
pulse->z0 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.dz");
pulse->dz = (float)config_setting_get_float(setting);
setting = config_lookup(config, "pulse.pulse_ratio");
pulse->pulse_ratio = (float)config_setting_get_float(setting);
return 0;
}
int get_global_config(config_t* config, global_setting_t* global_settings)
{
config_setting_t* setting = config_lookup(config, "global.msavefile");
global_settings->msavefile = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.msavedir");
global_settings->msavedir = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.vsavefile");
global_settings->vsavefile = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.vsavedir");
global_settings->vsavedir = (char*)config_setting_get_string(setting);
setting = config_lookup(config, "global.max_gpu_threads");
global_settings->max_gpu_threads = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "global.rseed");
global_settings->rseed = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "global.save_every_n");
global_settings->save_every_n = config_setting_get_int(setting);
return 0;
}
int get_simulation_config(config_t* config, simulation_t* simulation)
{
config_setting_t* setting = config_lookup(config, "simulation.tstart");
simulation->tstart = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.tstop");
simulation->tstop = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.dt");
simulation->dt = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.nharm");
simulation->nh = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "simulation.nelectrons");
simulation->nelectrons = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "simulation.ncells");
simulation->ncells = (unsigned int)config_setting_get_int64(setting);
setting = config_lookup(config, "simulation.z1");
simulation->z1 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.z2");
simulation->z2 = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.plasma_density");
simulation->plasma_density = (float)config_setting_get_float(setting);
setting = config_lookup(config, "simulation.static_ez");
simulation->static_ez = config_setting_get_bool(setting);
return 0;
}
int get_electron_config(config_t* config, electron_t* electron)
{
config_setting_t* setting = config_lookup(config, "electron.T");
electron->T = (float)config_setting_get_float(setting);
setting = config_lookup(config, "electron.m");
electron->m = (float)config_setting_get_float(setting);
setting = config_lookup(config, "electron.q");
electron->q = (float)config_setting_get_float(setting);
return 0;
}
__global__ void kernel_generate_ambmfm(float am, float fm, float pulse_ratio, float nh, float* d_fm, float* d_am, float* d_bm)
{
float tm = 2*PI/fm;
float tm1 = tm/pulse_ratio;
unsigned int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i==0)
{
d_bm[i] = am*(2*tm1-tm)/tm;
d_am[i] = 0;
}
else
{
d_am[i] = 2*am*(1-cos(i*fm*tm1))/(i*PI);
d_bm[i] = 2*am*sin(i*fm*tm1)/(i*PI);
}
d_fm[i] = fm*i;
}
__global__ void generate_ameandr(float* d_am, float* d_bm, float* d_fm, float t, unsigned int nh, float* d_a)
{
unsigned int i;
float a = 0;
for (i = 0; i < nh; ++i)
{
a += d_am[i]*sin(d_fm[i]*t) + d_bm[i]*cos(d_fm[i]*t);
}
*d_a = a;
}
int my_read_config_file(char* file, config_t* config)
{
config_init(config);
return config_read_file(config, file);
}
__global__ void global_associate_electrons_with_cells(float* d_z, float cellsize, unsigned int* d_association)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int ncell = floor(d_z[n]/cellsize);
d_association[n] = ncell;
}
__device__ float get_ex_field(float A0, float pulse_duration,
float z, unsigned int nt,
float z0, float dz,
float* am, float* tn)
{
float az = (fabs(z - z0) <= dz) ? 1 : 0;
float at = (tn[nt] - pulse_duration) < 0 ? 1 : 0;
return 0.5*az*at*(am[nt] + A0);
}
__global__ void update_ex_field(float A0, float* d_am, float fce, float dt,
unsigned int nt, float pulse_duration, float z0, float pulse_dz,
float cellsize, float* d_ex, float q, float m)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
float z = (n + 0.5)*cellsize;
float t = nt*dt;
float sfce = sin(fce*t);
float az = (fabs(z - z0) <= pulse_dz) ? 1 : 0;
float at = (t - pulse_duration) < 0 ? 1 : 0;
//d_ex[n] = get_ex_field(A0, pulse_duration, z, nt, z0, dz, d_am, tn)*sfce*q/m;
d_ex[n] = at*az*0.5*(A0 + *d_am)*sfce*q/m;
}
__global__ void kernel_generate_tn(float tstart, float dt, float* d_tn)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
d_tn[n] = tstart + n*dt;
}
__device__ void rotate_particle(float *vx, float* vy, float E, float fce, float delta)
{
float vx_m;
float vy_m;
float S = sin(fce*delta);
float C = cos(fce*delta);
vx_m = *vx + delta*E/2.;
vy_m = *vy;
*vx = (vx_m*C + vy_m*S) + delta*E/2.;
*vy = (-vx_m*S + vy_m*C);
}
__global__ void trace_electrons_single_step(float* d_vx, float* d_vy, float* d_vz,
float* d_z, float* d_fx, float* d_fz,
unsigned int* d_associate,
float fce, float delta)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
float vxt = d_vx[n];
float vyt = d_vy[n];
float dt = delta;
float tfce = fce;
unsigned int ncell = d_associate[n];
rotate_particle(&vxt, &vyt, d_fx[ncell], tfce, dt);
d_vx[n] = vxt;
d_vy[n] = vyt;
d_vz[n] += d_fz[ncell]*delta;
d_z[n] += d_vz[n]*dt;
}
__global__ void postproc(float* d_z, float* d_vx, float* d_vy,
float zmin, float zmax, float sigma, curandState* d_states)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int nr = threadIdx.x;
float L = zmax - zmin;
curandState rstate = d_states[nr];
if(d_z[n] > zmax)
{
d_z[n] -= L;
d_vx[n] = sigma*curand_normal(&rstate);
d_vy[n] = sigma*curand_normal(&rstate);
}
if(d_z[n] < zmin)
{
d_z[n] += L;
d_vx[n] = sigma*curand_normal(&rstate);
d_vy[n] = sigma*curand_normal(&rstate);
}
d_states[nr]=rstate;
}
__global__ void calculate_momentum_zdistribution(float* d_vx, float* d_vy,
unsigned int* d_associate,
float* d_m, unsigned int* d_n,
unsigned int nelectrons, float mass)
{
unsigned int ncell = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int i;
unsigned int ne = 0;
float m = 0;
for (i = 0; i < nelectrons; ++i)
{
if (d_associate[i] == ncell)
{
float vx = d_vx[i];
float vy = d_vy[i];
ne++;
m += vx*vx + vy*vy;
}
}
d_n[ncell] = ne;
d_m[ncell] = 0.5*mass*m/ne;
}
void dump(char* savedir, char* filename, unsigned int n, float* d_vx, float* d_vy,
unsigned int* d_cell_electron_association, float* d_m, unsigned int* d_n,
unsigned int nelectrons, unsigned int ncells, float m, float cellsize, float z1, float dt,
unsigned int max_threads)
{
FILE* to;
float* momentum = (float*)malloc(sizeof(float)*ncells);
unsigned int i;
char filenamei[PATH_MAX];
sprintf(filenamei, "%s/%s_%d.dat", savedir, filename, n);
to = fopen(filenamei, "w");
calculate_momentum_zdistribution<<<ncells/max_threads, max_threads>>>(d_vx, d_vy, d_cell_electron_association, d_m, d_n, nelectrons, m);
cudaMemcpy(momentum, d_m, sizeof(float)*ncells, cudaMemcpyDeviceToHost);
for (i = 0; i < ncells; ++i)
{
fprintf(to, "%e\t%e\n", z1 + (i+0.5)*cellsize, momentum[i]);
}
free(momentum);
fclose(to);
printf("file %d dumped\n", n);
}
__global__ void setup_rstates(curandState* states, unsigned long rseed)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
curand_init(rseed, n, 0, &states[n]);
}
__global__ void do_calculate_rho_cuda(unsigned int* d_associate,
unsigned int* d_rho)
{
unsigned int nelectron = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int ncell = d_associate[nelectron];
atomicInc(&d_rho[ncell], UINT_MAX);
}
__global__ void generate_kn(float k0, float dx, float* d_k)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
float k = k0*(n+1);
d_k[n] = k*k*diff2(k,dx/2);
}
__global__ void copy_rho_to_cufft(unsigned int* d_rho, cufftComplex* data, float coeff)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
data[n] = make_cuFloatComplex (coeff*(float)d_rho[n],0);
}
__global__ void copy_cufft_to_phi(float* d_phi, cufftComplex* data)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
cufftComplex thisd = data[n];
d_phi[n] = cuCrealf(thisd);
}
__global__ void poisson_harmonics_transform(float* d_k, cufftComplex* data, unsigned int nharm)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
cufftComplex thisd = data[n+1];
float r;
float i;
r = 4*PI*cuCrealf(thisd)/d_k[n];
i = 4*PI*cuCimagf(thisd)/d_k[n];
data[n+1] = make_cuFloatComplex (r,i);
data[nharm/2 + n] = cuConjf(data[nharm/2 - n]);
}
__global__ void zero_harm_hack_for_fftdata(cufftComplex* data)
{
data[0] = make_cuFloatComplex(0,0);
}
__global__ void calculate_ez_cuda(float* d_phi, float* d_ez, unsigned int ncells, float q, float m, float cellsize)
{
unsigned int n = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int nl = n - 1;
unsigned int nr = n + 1;
float qm = q/m;
if (n==0)
{
nl = ncells-1;
nr = 1;
}
if (n==ncells-1)
{
nl = ncells-2;
nr = 0;
}
d_ez[n]= -qm*(d_phi[nr] - d_phi[nl])/2/cellsize/ncells;
}
void update_ez_cuda(cufftHandle plan, unsigned int* d_rho, float* d_k, float* d_ez, float* d_phi,
cufftComplex* data, unsigned int ncells, unsigned int max_threads,
float dens_coeff, float q, float m, float cellsize)
{
copy_rho_to_cufft<<<ncells/max_threads, max_threads>>>(d_rho, data, dens_coeff);
cufftExecC2C(plan, data, data, CUFFT_FORWARD);
poisson_harmonics_transform<<<ncells/max_threads/2, max_threads>>>(d_k, data, ncells);
zero_harm_hack_for_fftdata<<<1,1>>>(data);
cufftExecC2C(plan, data, data, CUFFT_INVERSE);
copy_cufft_to_phi<<<ncells/max_threads, max_threads>>>(d_phi, data);
calculate_ez_cuda<<<ncells/max_threads, max_threads>>>(d_phi, d_ez, ncells, q,m, cellsize);
}
void dump_vperp(char* savedir, char* savefile, unsigned int n,
float* d_vx, float* d_vy, float* vx, float* vy,
unsigned int* d_associate, unsigned int* associate,
unsigned int nelectrons, unsigned int ncells, float m)
{
unsigned int i;
char fullpath[PATH_MAX];
CUDA_CALL(cudaMemcpy(vx, d_vx, sizeof(float)*nelectrons, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(vy, d_vy, sizeof(float)*nelectrons, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(associate, d_associate, sizeof(unsigned int)*nelectrons, cudaMemcpyDeviceToHost));
sprintf(fullpath, "%s/%s_%d.dat", savedir, savefile, n);
FILE* to;
#pragma omp parallel for
for (i=0; i < nelectrons; ++i)
{
float vxt = vx[i];
vx[i] = 0.5*m*(vxt*vxt+vy[i]*vy[i]);
}
sprintf(fullpath, "%s/%s_%d_vperp.dat", savedir, savefile, n);
to = fopen(fullpath, "w");
fwrite(vx, sizeof(float), nelectrons, to);
fclose(to);
sprintf(fullpath, "%s/%s_%d_association.dat", savedir, savefile, n);
to = fopen(fullpath, "w");
fwrite(associate, sizeof(unsigned int), nelectrons, to);
fclose(to);
printf("Particle speed and distribution dumped: %d\n", n);
}
int main(int argc, char** argv)
{
char* config_file = argv[1];
printf("Read the configuration in %s\n", config_file);
config_t configuration;
pulse_t pulse;
simulation_t simulation;
global_setting_t global_settings;
electron_t electron;
my_read_config_file(config_file, &configuration);
get_pulse_config(&configuration, &pulse);
get_simulation_config(&configuration, &simulation);
get_global_config(&configuration, &global_settings);
get_electron_config(&configuration, &electron);
float fce = 2*PI*pulse.fce;
float fm = 2*PI*pulse.fm;
float Am = pulse.Am;
float A0 = pulse.A0;
float pulse_dz = pulse.dz;
float pulse_z0 = pulse.z0;
unsigned int nh = simulation.nh;
float tstart = simulation.tstart;
float tstop = simulation.tstop;
float dt = simulation.dt;
unsigned int ntpoints = ceil((tstop-tstart)/dt);
float pulse_duration = pulse.T;
float pulse_ratio = pulse.pulse_ratio;
unsigned int ncells = simulation.ncells;
unsigned int nelectrons = simulation.nelectrons;
unsigned int max_threads = global_settings.max_gpu_threads;
unsigned int rseed = global_settings.rseed;
float z1 = simulation.z1;
float z2 = simulation.z2;
float dz = (z2-z1)/ncells;
float q = electron.q;
float m = electron.m;
float Te = electron.T;
float sigma = sqrt(EV_IN_ERGS*Te/m);
float plasma_density = simulation.plasma_density;
curandGenerator_t cuda_r;
char* msavefile = global_settings.msavefile;
char* msavedir = global_settings.msavedir;
char* vsavefile = global_settings.vsavefile;
char* vsavedir = global_settings.vsavedir;
int static_ez = simulation.static_ez;
unsigned int save_every_n = global_settings.save_every_n;
printf("Allocate memory\n");
curandState* d_rstates;
CUDA_CALL(cudaMalloc(&d_rstates, sizeof(curandState)*max_threads));
setup_rstates<<<1,max_threads>>>(d_rstates, rseed);
CURAND_CALL(curandCreateGenerator(&cuda_r, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(cuda_r, rseed));
float* d_vx = NULL;
float* d_vy = NULL;
float* d_vz = NULL;
float* d_z = NULL;
float* d_ex = NULL;
unsigned int* d_association = NULL;
unsigned int* d_buffer = NULL;
unsigned int* n = (unsigned int*)malloc(sizeof(unsigned int)*ncells);
unsigned int* association = (unsigned int*)malloc(sizeof(unsigned int)*nelectrons);
float* vx = (float*)malloc(sizeof(float)*nelectrons);
float* vy = (float*)malloc(sizeof(float)*nelectrons);
float* d_am = NULL;
float* d_bm;
float* d_fm = NULL;
float* d_a = NULL;
float* d_m = NULL;
unsigned int* d_n = NULL;
unsigned int* d_rho;
float* ez = (float*)malloc(sizeof(float)*ncells);
float* d_ez;
float* d_phi;
float density_simulation_coeff = q*plasma_density*(z2 - z1)/nelectrons/dz;
float* d_k;
cufftComplex *d_fft_data;
cufftHandle plan;
CUDA_CALL(cudaMalloc((void**)&d_fft_data, sizeof(cufftComplex)*ncells));
cufftPlan1d(&plan, ncells, CUFFT_C2C, 1);
CUDA_CALL(cudaMalloc(&d_vx, sizeof(float)*nelectrons));
CUDA_CALL(cudaMalloc(&d_vy, sizeof(float)*nelectrons));
CUDA_CALL(cudaMalloc(&d_vz, sizeof(float)*nelectrons));
CUDA_CALL(cudaMalloc(&d_z, sizeof(float)*nelectrons));
CUDA_CALL(cudaMalloc(&d_ex, sizeof(float)*ncells));
CUDA_CALL(cudaMalloc(&d_am, sizeof(float)*nh));
CUDA_CALL(cudaMalloc(&d_bm, sizeof(float)*nh));
CUDA_CALL(cudaMalloc(&d_fm, sizeof(float)*nh));
CUDA_CALL(cudaMalloc(&d_a, sizeof(float)));
CUDA_CALL(cudaMalloc(&d_m, sizeof(float)*ncells));
CUDA_CALL(cudaMalloc(&d_n, sizeof(unsigned int)*ncells));
CUDA_CALL(cudaMalloc(&d_rho, sizeof(unsigned int)*ncells));
CUDA_CALL(cudaMalloc(&d_ez, sizeof(float)*ncells));
CUDA_CALL(cudaMalloc(&d_phi, sizeof(float)*ncells));
CUDA_CALL(cudaMalloc(&d_k, sizeof(float)*ncells/2));
CUDA_CALL(cudaMalloc(&d_association, sizeof(unsigned int)*nelectrons));
CUDA_CALL(cudaMalloc(&d_buffer, sizeof(unsigned int)*nelectrons));
printf("Preparing the initial data\n");
CUDA_CALL(cudaMemset(d_n, 0, sizeof(float)*ncells));
CUDA_CALL(cudaMemset(d_m, 0, sizeof(float)*ncells));
CUDA_CALL(cudaMemset(d_ez, 0, sizeof(float)*ncells));
kernel_generate_ambmfm<<<1,nh>>>(Am, fm, pulse_ratio, nh, d_fm, d_am, d_bm);
set_speed_maxwell_cuda(cuda_r, d_vx, sigma, nelectrons);
set_speed_maxwell_cuda(cuda_r, d_vy, sigma, nelectrons);
set_speed_maxwell_cuda(cuda_r, d_vz, sigma, nelectrons);
set_pos_uniform_cuda(cuda_r, d_z, z1, z2, nelectrons, max_threads);
generate_kn<<<ncells/max_threads/2, max_threads>>>(2*PI/(z2-z1), dz, d_k);
unsigned int i = 0;
printf("Start the calculations!\n");
for(i = 0; i < ntpoints; ++i)
{
//printf("cycle number: %i\n", i);
generate_ameandr<<<1, 1>>>(d_am, d_bm, d_fm, i*dt, nh, d_a);
update_ex_field<<<ncells/max_threads, max_threads>>>(A0, d_a, fce, dt, i,
pulse_duration,
pulse_z0, pulse_dz, dz,
d_ex, q, m);
postproc<<<nelectrons/max_threads, max_threads>>>(d_z, d_vx, d_vy, z1, z2, sigma, d_rstates);
global_associate_electrons_with_cells<<<nelectrons/max_threads, max_threads>>>(d_z,
dz,
d_association);
if(static_ez)
{
CUDA_CALL(cudaMemset(d_rho, 0, sizeof(unsigned int)*ncells));
do_calculate_rho_cuda<<<nelectrons/max_threads, max_threads>>>(d_association, d_rho);
update_ez_cuda(plan, d_rho, d_k, d_ez, d_phi, d_fft_data, ncells,
max_threads, density_simulation_coeff,q,m,dz );
}
trace_electrons_single_step<<<nelectrons/max_threads, max_threads>>>(d_vx, d_vy, d_vz,
d_z, d_ex, d_ez,
d_association,
fce, dt);
if ((i%save_every_n) == 0)
{
dump(msavedir, msavefile, i/save_every_n, d_vx, d_vy, d_association, d_m,
d_n, nelectrons, ncells, m, dz, z1, dt, max_threads);
dump_vperp(vsavedir, vsavefile, i/save_every_n,
d_vx, d_vy, vx, vy,
d_association, association,
nelectrons, ncells, m);
}
}
printf("The calculations are done!\n");
printf("Free the memory\n");
CUDA_CALL(cudaFree(d_vx));
CUDA_CALL(cudaFree(d_vy));
CUDA_CALL(cudaFree(d_vz));
CUDA_CALL(cudaFree(d_z));
CUDA_CALL(cudaFree(d_ex));
CUDA_CALL(cudaFree(d_am));
CUDA_CALL(cudaFree(d_bm));
CUDA_CALL(cudaFree(d_fm));
CUDA_CALL(cudaFree(d_a));
CUDA_CALL(cudaFree(d_association));
CUDA_CALL(cudaFree(d_m));
CUDA_CALL(cudaFree(d_n));
CUDA_CALL(cudaFree(d_rho));
CUDA_CALL(cudaFree(d_ez));
CUDA_CALL(cudaFree(d_rstates));
CUDA_CALL(cudaFree(d_fft_data));
CUDA_CALL(cudaFree(d_phi));
CUDA_CALL(cudaFree(d_buffer));
free(n);
free(ez);
free(association);
free(vx);
free(vy);
cufftDestroy(plan);
CURAND_CALL(curandDestroyGenerator(cuda_r));
config_destroy (&configuration);
printf("The programm is done!\n");
}
|
a1c5735a48f6d2d095f57ac5def825ce664a7c92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 Jisang Yoon
// All rights reserved.
//
// This source code is licensed under the Apache 2.0 license found in the
// LICENSE file in the root directory of this source tree.
#include "cuw2v/cuw2v.hpp"
#include "cuw2v/cuda_w2v_base_kernels.cuh"
#include "cuw2v/cuda_w2v_ns_kernels.cuh"
#include "cuw2v/cuda_w2v_hs_kernels.cuh"
namespace cusim {
struct HuffmanTreeNode {
float count;
int index, left, right;
HuffmanTreeNode(float count0, int index0, int left0, int right0) {
count = count0; index = index0; left = left0; right = right0;
}
};
std::vector<HuffmanTreeNode> huffman_nodes;
bool CompareIndex(int lhs, int rhs) {
return huffman_nodes[lhs].count > huffman_nodes[rhs].count;
}
CuW2V::CuW2V() {
logger_container_.reset(new CuSimLogger("w2v"));
logger_ = logger_container_->get_logger();
dev_info_ = GetDeviceInfo();
if (dev_info_.unknown) DEBUG0("Unknown device type");
INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}",
dev_info_.major, dev_info_.minor, dev_info_.mp_cnt, dev_info_.cores);
}
CuW2V::~CuW2V() {}
bool CuW2V::Init(std::string opt_path) {
std::ifstream in(opt_path.c_str());
if (not in.is_open()) return false;
std::string str((std::istreambuf_iterator<char>(in)),
std::istreambuf_iterator<char>());
std::string err_cmt;
auto _opt = json11::Json::parse(str, err_cmt);
if (not err_cmt.empty()) return false;
opt_ = _opt;
logger_container_->set_log_level(opt_["c_log_level"].int_value());
num_dims_ = opt_["num_dims"].int_value();
block_dim_ = opt_["block_dim"].int_value();
block_cnt_ = opt_["hyper_threads"].number_value() * (dev_info_.cores / block_dim_);
sg_ = opt_["skip_gram"].bool_value();
cbow_mean_ = opt_["cbow_mean"].bool_value();
window_size_ = opt_["window_size"].int_value();
lr_ = opt_["lr"].number_value();
// if zero, we will use hierarchical softmax
neg_ = opt_["neg"].int_value();
// random seed
seed_ = opt_["seed"].int_value();
dev_rngs_.resize(block_cnt_);
hipLaunchKernelGGL(( InitRngsKernel), dim3(block_cnt_), dim3(1), 0, 0,
thrust::raw_pointer_cast(dev_rngs_.data()), seed_);
INFO("num_dims: {}, block_dim: {}, block_cnt: {}, objective type: {}, neg: {}",
num_dims_, block_dim_, block_cnt_, sg_? "skip gram": "cbow", neg_);
return true;
}
void CuW2V::BuildRandomTable(const double* word_count, const int num_words, const int table_size) {
num_words_ = num_words;
std::vector<int> host_random_table;
for (int i = 0; i < num_words; ++i) {
int weight = ::max(1, static_cast<int>(word_count[i] * static_cast<double>(table_size)));
for (int j = 0; j < weight; ++j)
host_random_table.push_back(i);
}
random_size_ = host_random_table.size();
dev_random_table_.resize(random_size_);
thrust::copy(host_random_table.begin(), host_random_table.end(), dev_random_table_.begin());
CHECK_CUDA(hipDeviceSynchronize());
INFO("random table initialzied, size: {} => {}", table_size, random_size_);
}
void CuW2V::BuildHuffmanTree(const float* word_count, const int num_words) {
num_words_ = num_words;
huffman_nodes.clear();
std::priority_queue<int, std::vector<int>, decltype(&CompareIndex)> pq(CompareIndex);
for (int i = 0; i < num_words; ++i) {
huffman_nodes.emplace_back(word_count[i], i, -1, -1);
pq.push(i);
}
for (int i = 0; i < num_words - 1; ++i) {
auto& min1 = huffman_nodes[pq.top()]; pq.pop();
auto& min2 = huffman_nodes[pq.top()]; pq.pop();
huffman_nodes.emplace_back(min1.count + min2.count, i + num_words, min1.index, min2.index);
pq.push(i + num_words);
}
std::vector<std::tuple<int, std::vector<bool>, std::vector<int>>> stack = {{pq.top(), {}, {}}};
int nodeid;
std::vector<bool> code;
std::vector<int> point;
std::vector<std::vector<bool>> codes(num_words);
std::vector<std::vector<int>> points(num_words);
max_depth_ = 0;
while (not stack.empty()) {
std::tie(nodeid, code, point) = stack.back();
stack.pop_back();
if (nodeid < num_words) {
codes[nodeid] = code;
points[nodeid] = point;
max_depth_ = ::max(max_depth_,
static_cast<int>(code.size()));
} else {
point.push_back(nodeid - num_words);
std::vector<bool> left_code = code;
std::vector<bool> right_code = code;
left_code.push_back(false);
right_code.push_back(true);
auto& node = huffman_nodes[nodeid];
stack.push_back(make_tuple(node.left, left_code, point));
stack.push_back(make_tuple(node.right, right_code, point));
}
}
std::vector<bool> host_codes;
std::vector<int> host_points;
std::vector<int> host_hs_indptr = {0};
int size = 0;
for (int i = 0; i < num_words; ++i) {
code = codes[i];
point = points[i];
int n = code.size();
size += n;
host_hs_indptr.push_back(size);
for (int j = 0; j < n; ++j) {
host_codes.push_back(code[j]);
host_points.push_back(point[j]);
}
}
dev_codes_.resize(size); dev_points_.resize(size), dev_hs_indptr_.resize(num_words + 1);
thrust::copy(host_codes.begin(), host_codes.end(), dev_codes_.begin());
thrust::copy(host_points.begin(), host_points.end(), dev_points_.begin());
thrust::copy(host_hs_indptr.begin(), host_hs_indptr.end(), dev_hs_indptr_.begin());
CHECK_CUDA(hipDeviceSynchronize());
huffman_nodes.clear();
}
void CuW2V::LoadModel(float* emb_in, float* emb_out) {
int out_words = neg_? num_words_: num_words_ - 1;
// copy embedding
DEBUG("copy model({} x {})", num_words_, num_dims_);
dev_emb_in_.resize(num_words_ * num_dims_);
dev_emb_out_.resize(out_words * num_dims_);
thrust::copy(emb_in, emb_in + num_words_ * num_dims_, dev_emb_in_.begin());
thrust::copy(emb_out, emb_out + out_words * num_dims_, dev_emb_out_.begin());
emb_in_ = emb_in; emb_out_ = emb_out;
CHECK_CUDA(hipDeviceSynchronize());
}
std::pair<float, float> CuW2V::FeedData(const int* cols, const int* indptr,
const int num_cols, const int num_indptr) {
// copy feed data to GPU memory
thrust::device_vector<int> dev_cols(num_cols);
thrust::device_vector<int> dev_indptr(num_indptr + 1);
thrust::device_vector<float> dev_loss_nume(block_cnt_, 0.0f);
thrust::device_vector<float> dev_loss_deno(block_cnt_, 0.0f);
thrust::copy(cols, cols + num_cols, dev_cols.begin());
thrust::copy(indptr, indptr + num_indptr + 1, dev_indptr.begin());
CHECK_CUDA(hipDeviceSynchronize());
DEBUG0("copy feed data to GPU memory");
// run GPU kernels
if (neg_ > 0) {
if (sg_) {
hipLaunchKernelGGL(( W2VNegSgKernel), dim3(block_cnt_), dim3(block_dim_), num_dims_ * sizeof(float), 0,
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_random_table_.data()),
thrust::raw_pointer_cast(dev_rngs_.data()),
random_size_, num_indptr, num_dims_, neg_, window_size_,
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
lr_);
} else {
hipLaunchKernelGGL(( W2VNegCbowKernel), dim3(block_cnt_), dim3(block_dim_), 2 * num_dims_ * sizeof(float), 0,
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_random_table_.data()),
thrust::raw_pointer_cast(dev_rngs_.data()),
random_size_, num_indptr, num_dims_, neg_, window_size_,
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
cbow_mean_, lr_);
}
} else {
if (sg_) {
hipLaunchKernelGGL(( W2VHsSgKernel), dim3(block_cnt_), dim3(block_dim_), num_dims_ * sizeof(float), 0,
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_codes_.data()),
thrust::raw_pointer_cast(dev_points_.data()),
thrust::raw_pointer_cast(dev_hs_indptr_.data()),
num_indptr, num_dims_, window_size_,
thrust::raw_pointer_cast(dev_rngs_.data()),
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
lr_);
} else {
hipLaunchKernelGGL(( W2VHsCbowKernel), dim3(block_cnt_), dim3(block_dim_), 2 * num_dims_ * sizeof(float), 0,
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_codes_.data()),
thrust::raw_pointer_cast(dev_points_.data()),
thrust::raw_pointer_cast(dev_hs_indptr_.data()),
num_indptr, num_dims_, window_size_,
thrust::raw_pointer_cast(dev_rngs_.data()),
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
cbow_mean_, lr_);
}
}
CHECK_CUDA(hipDeviceSynchronize());
// accumulate loss nume / deno
std::vector<float> loss_nume(block_cnt_), loss_deno(block_cnt_);
thrust::copy(dev_loss_nume.begin(), dev_loss_nume.end(), loss_nume.begin());
thrust::copy(dev_loss_deno.begin(), dev_loss_deno.end(), loss_deno.begin());
CHECK_CUDA(hipDeviceSynchronize());
float loss_nume_sum = std::accumulate(loss_nume.begin(), loss_nume.end(), 0.0f);
float loss_deno_sum = std::accumulate(loss_deno.begin(), loss_deno.end(), 0.0f);
DEBUG("loss nume: {}, deno: {}", loss_nume_sum, loss_deno_sum);
return {loss_nume_sum, loss_deno_sum};
}
void CuW2V::Pull() {
thrust::copy(dev_emb_in_.begin(), dev_emb_in_.end(), emb_in_);
thrust::copy(dev_emb_out_.begin(), dev_emb_out_.end(), emb_out_);
CHECK_CUDA(hipDeviceSynchronize());
}
} // namespace cusim
| a1c5735a48f6d2d095f57ac5def825ce664a7c92.cu | // Copyright (c) 2021 Jisang Yoon
// All rights reserved.
//
// This source code is licensed under the Apache 2.0 license found in the
// LICENSE file in the root directory of this source tree.
#include "cuw2v/cuw2v.hpp"
#include "cuw2v/cuda_w2v_base_kernels.cuh"
#include "cuw2v/cuda_w2v_ns_kernels.cuh"
#include "cuw2v/cuda_w2v_hs_kernels.cuh"
namespace cusim {
struct HuffmanTreeNode {
float count;
int index, left, right;
HuffmanTreeNode(float count0, int index0, int left0, int right0) {
count = count0; index = index0; left = left0; right = right0;
}
};
std::vector<HuffmanTreeNode> huffman_nodes;
bool CompareIndex(int lhs, int rhs) {
return huffman_nodes[lhs].count > huffman_nodes[rhs].count;
}
CuW2V::CuW2V() {
logger_container_.reset(new CuSimLogger("w2v"));
logger_ = logger_container_->get_logger();
dev_info_ = GetDeviceInfo();
if (dev_info_.unknown) DEBUG0("Unknown device type");
INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}",
dev_info_.major, dev_info_.minor, dev_info_.mp_cnt, dev_info_.cores);
}
CuW2V::~CuW2V() {}
bool CuW2V::Init(std::string opt_path) {
std::ifstream in(opt_path.c_str());
if (not in.is_open()) return false;
std::string str((std::istreambuf_iterator<char>(in)),
std::istreambuf_iterator<char>());
std::string err_cmt;
auto _opt = json11::Json::parse(str, err_cmt);
if (not err_cmt.empty()) return false;
opt_ = _opt;
logger_container_->set_log_level(opt_["c_log_level"].int_value());
num_dims_ = opt_["num_dims"].int_value();
block_dim_ = opt_["block_dim"].int_value();
block_cnt_ = opt_["hyper_threads"].number_value() * (dev_info_.cores / block_dim_);
sg_ = opt_["skip_gram"].bool_value();
cbow_mean_ = opt_["cbow_mean"].bool_value();
window_size_ = opt_["window_size"].int_value();
lr_ = opt_["lr"].number_value();
// if zero, we will use hierarchical softmax
neg_ = opt_["neg"].int_value();
// random seed
seed_ = opt_["seed"].int_value();
dev_rngs_.resize(block_cnt_);
InitRngsKernel<<<block_cnt_, 1>>>(
thrust::raw_pointer_cast(dev_rngs_.data()), seed_);
INFO("num_dims: {}, block_dim: {}, block_cnt: {}, objective type: {}, neg: {}",
num_dims_, block_dim_, block_cnt_, sg_? "skip gram": "cbow", neg_);
return true;
}
void CuW2V::BuildRandomTable(const double* word_count, const int num_words, const int table_size) {
num_words_ = num_words;
std::vector<int> host_random_table;
for (int i = 0; i < num_words; ++i) {
int weight = std::max(1, static_cast<int>(word_count[i] * static_cast<double>(table_size)));
for (int j = 0; j < weight; ++j)
host_random_table.push_back(i);
}
random_size_ = host_random_table.size();
dev_random_table_.resize(random_size_);
thrust::copy(host_random_table.begin(), host_random_table.end(), dev_random_table_.begin());
CHECK_CUDA(cudaDeviceSynchronize());
INFO("random table initialzied, size: {} => {}", table_size, random_size_);
}
void CuW2V::BuildHuffmanTree(const float* word_count, const int num_words) {
num_words_ = num_words;
huffman_nodes.clear();
std::priority_queue<int, std::vector<int>, decltype(&CompareIndex)> pq(CompareIndex);
for (int i = 0; i < num_words; ++i) {
huffman_nodes.emplace_back(word_count[i], i, -1, -1);
pq.push(i);
}
for (int i = 0; i < num_words - 1; ++i) {
auto& min1 = huffman_nodes[pq.top()]; pq.pop();
auto& min2 = huffman_nodes[pq.top()]; pq.pop();
huffman_nodes.emplace_back(min1.count + min2.count, i + num_words, min1.index, min2.index);
pq.push(i + num_words);
}
std::vector<std::tuple<int, std::vector<bool>, std::vector<int>>> stack = {{pq.top(), {}, {}}};
int nodeid;
std::vector<bool> code;
std::vector<int> point;
std::vector<std::vector<bool>> codes(num_words);
std::vector<std::vector<int>> points(num_words);
max_depth_ = 0;
while (not stack.empty()) {
std::tie(nodeid, code, point) = stack.back();
stack.pop_back();
if (nodeid < num_words) {
codes[nodeid] = code;
points[nodeid] = point;
max_depth_ = std::max(max_depth_,
static_cast<int>(code.size()));
} else {
point.push_back(nodeid - num_words);
std::vector<bool> left_code = code;
std::vector<bool> right_code = code;
left_code.push_back(false);
right_code.push_back(true);
auto& node = huffman_nodes[nodeid];
stack.push_back(make_tuple(node.left, left_code, point));
stack.push_back(make_tuple(node.right, right_code, point));
}
}
std::vector<bool> host_codes;
std::vector<int> host_points;
std::vector<int> host_hs_indptr = {0};
int size = 0;
for (int i = 0; i < num_words; ++i) {
code = codes[i];
point = points[i];
int n = code.size();
size += n;
host_hs_indptr.push_back(size);
for (int j = 0; j < n; ++j) {
host_codes.push_back(code[j]);
host_points.push_back(point[j]);
}
}
dev_codes_.resize(size); dev_points_.resize(size), dev_hs_indptr_.resize(num_words + 1);
thrust::copy(host_codes.begin(), host_codes.end(), dev_codes_.begin());
thrust::copy(host_points.begin(), host_points.end(), dev_points_.begin());
thrust::copy(host_hs_indptr.begin(), host_hs_indptr.end(), dev_hs_indptr_.begin());
CHECK_CUDA(cudaDeviceSynchronize());
huffman_nodes.clear();
}
void CuW2V::LoadModel(float* emb_in, float* emb_out) {
int out_words = neg_? num_words_: num_words_ - 1;
// copy embedding
DEBUG("copy model({} x {})", num_words_, num_dims_);
dev_emb_in_.resize(num_words_ * num_dims_);
dev_emb_out_.resize(out_words * num_dims_);
thrust::copy(emb_in, emb_in + num_words_ * num_dims_, dev_emb_in_.begin());
thrust::copy(emb_out, emb_out + out_words * num_dims_, dev_emb_out_.begin());
emb_in_ = emb_in; emb_out_ = emb_out;
CHECK_CUDA(cudaDeviceSynchronize());
}
std::pair<float, float> CuW2V::FeedData(const int* cols, const int* indptr,
const int num_cols, const int num_indptr) {
// copy feed data to GPU memory
thrust::device_vector<int> dev_cols(num_cols);
thrust::device_vector<int> dev_indptr(num_indptr + 1);
thrust::device_vector<float> dev_loss_nume(block_cnt_, 0.0f);
thrust::device_vector<float> dev_loss_deno(block_cnt_, 0.0f);
thrust::copy(cols, cols + num_cols, dev_cols.begin());
thrust::copy(indptr, indptr + num_indptr + 1, dev_indptr.begin());
CHECK_CUDA(cudaDeviceSynchronize());
DEBUG0("copy feed data to GPU memory");
// run GPU kernels
if (neg_ > 0) {
if (sg_) {
W2VNegSgKernel<<<block_cnt_, block_dim_, num_dims_ * sizeof(float)>>>(
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_random_table_.data()),
thrust::raw_pointer_cast(dev_rngs_.data()),
random_size_, num_indptr, num_dims_, neg_, window_size_,
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
lr_);
} else {
W2VNegCbowKernel<<<block_cnt_, block_dim_, 2 * num_dims_ * sizeof(float)>>>(
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_random_table_.data()),
thrust::raw_pointer_cast(dev_rngs_.data()),
random_size_, num_indptr, num_dims_, neg_, window_size_,
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
cbow_mean_, lr_);
}
} else {
if (sg_) {
W2VHsSgKernel<<<block_cnt_, block_dim_, num_dims_ * sizeof(float)>>>(
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_codes_.data()),
thrust::raw_pointer_cast(dev_points_.data()),
thrust::raw_pointer_cast(dev_hs_indptr_.data()),
num_indptr, num_dims_, window_size_,
thrust::raw_pointer_cast(dev_rngs_.data()),
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
lr_);
} else {
W2VHsCbowKernel<<<block_cnt_, block_dim_, 2 * num_dims_ * sizeof(float)>>>(
thrust::raw_pointer_cast(dev_cols.data()),
thrust::raw_pointer_cast(dev_indptr.data()),
thrust::raw_pointer_cast(dev_codes_.data()),
thrust::raw_pointer_cast(dev_points_.data()),
thrust::raw_pointer_cast(dev_hs_indptr_.data()),
num_indptr, num_dims_, window_size_,
thrust::raw_pointer_cast(dev_rngs_.data()),
thrust::raw_pointer_cast(dev_emb_in_.data()),
thrust::raw_pointer_cast(dev_emb_out_.data()),
thrust::raw_pointer_cast(dev_loss_nume.data()),
thrust::raw_pointer_cast(dev_loss_deno.data()),
cbow_mean_, lr_);
}
}
CHECK_CUDA(cudaDeviceSynchronize());
// accumulate loss nume / deno
std::vector<float> loss_nume(block_cnt_), loss_deno(block_cnt_);
thrust::copy(dev_loss_nume.begin(), dev_loss_nume.end(), loss_nume.begin());
thrust::copy(dev_loss_deno.begin(), dev_loss_deno.end(), loss_deno.begin());
CHECK_CUDA(cudaDeviceSynchronize());
float loss_nume_sum = std::accumulate(loss_nume.begin(), loss_nume.end(), 0.0f);
float loss_deno_sum = std::accumulate(loss_deno.begin(), loss_deno.end(), 0.0f);
DEBUG("loss nume: {}, deno: {}", loss_nume_sum, loss_deno_sum);
return {loss_nume_sum, loss_deno_sum};
}
void CuW2V::Pull() {
thrust::copy(dev_emb_in_.begin(), dev_emb_in_.end(), emb_in_);
thrust::copy(dev_emb_out_.begin(), dev_emb_out_.end(), emb_out_);
CHECK_CUDA(cudaDeviceSynchronize());
}
} // namespace cusim
|
32549aeb5ff675e88e6654234dc286ac02087d20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This program generates the DPH for given input angle thetaX and thetaY
Here the total no of photons accepted is 2000000
This program uses GPU computing, To execute it for all the four quadrant use the command
mpirun -np 3 -host cn001 ./cudaevent 2.3215 2.3215 : -np 1 -host cn002 ./cudaevent
This will use three gpu cards from first node and one gpu card from second node.
Author: Ajay Vibhute
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
#include<time.h>
#include<sys/time.h>
#include<cuda.h>
#include <hiprand/hiprand_kernel.h>
#include "fitsio.h"
#include <mpi.h>
#define PI 3.14159265
#define noBlock 10
#define noThread 256
#define TOTALACCEPTED 25600
//declaring the required functions
void printerror( int );
__global__ void kernel(float tx,float ty,float height,int *maskPattern,float * dphValues,int *,int *,int,int,int*,int*,int*,int*,int);
__device__ void generateEvent(float tx,float ty,float height,int *maskPattern,float * dphValues,int *,int*,int,int*,int*,int*,int*,int);
void executeKernel(float tx,float ty,int myrank,int gpuId,int*accepted,int*rejected,int);
__device__ void getDetectorIdPixelNo(int x,int y,int *moduleNo,int *pixelNo,int detectorId);
void getTimeEnergy(char * filename,int *time,int * energy);
void cudaInit(int gpuId);
//start of main
int main(int argc,char*argv[])
{
int myrank=0,npes=0;
float timeSpent=0.0;
fitsfile *fptr=NULL;
int status=0;
char output_filename[100] ="",temp[100],hostname[100];
int bitpix = FLOAT_IMG; /* 16-bit unsigned short pixel values */
float tx=0,ty=0;
int gpuId=0,totalAccepted=0,totalRejected=0,totalGeneratedCount=0,totalGeneratedCountMean=0;
int *x=NULL,*y=NULL,*detectorId=NULL,*pixelNo=NULL, *time=NULL,*energy=NULL;
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&npes);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
totalGeneratedCountMean++;
totalGeneratedCount=TOTALACCEPTED;//2001920;
//reading inputs at rank 0
if(myrank==0)
{
int buf[TOTALACCEPTED];
MPI_Buffer_attach( buf, TOTALACCEPTED+1000 );
timeSpent=MPI_Wtime();
if(argv[1]==NULL||argv[2]==NULL)
{
printf("Enter value for ThetaX\n");
scanf("%f",&tx);
printf("Enter value for ThetaY\n");
scanf("%f",&ty);
}
else
{
tx=(float)atof(argv[1]);
ty=(float)atof(argv[2]);
}
/*
Calculate the total generated Count by using poisson distribution.
*/
}
MPI_Barrier(MPI_COMM_WORLD);
//Broadcasting the inputs
MPI_Bcast(&tx,1,MPI_FLOAT,0,MPI_COMM_WORLD);
MPI_Bcast(&ty,1,MPI_FLOAT,0,MPI_COMM_WORLD);
//Assigning GPU cards
if(myrank!=3)
{
gpuId=myrank;
}
else
{
//The last MPI process will run on another node, so first GPU card will be used.
gpuId=0;
}
//calling kernal
executeKernel(tx,ty,myrank,gpuId,&totalAccepted,&totalRejected,totalGeneratedCount);
gethostname(hostname,sizeof(hostname));
if(myrank==0)
{
//writing event file
time=(int*)malloc(sizeof(int)*totalGeneratedCount);
energy=(int*)malloc(sizeof(int)*totalGeneratedCount);
x=(int*)malloc(sizeof(int)*totalGeneratedCount);
y=(int*)malloc(sizeof(int)*totalGeneratedCount);
detectorId=(int*)malloc(sizeof(int)*totalGeneratedCount);
pixelNo=(int*)malloc(sizeof(int)*totalGeneratedCount);
int tfields =6; /* table will have 3 columns */
long nrows = TOTALACCEPTED; /* table will have 6 rows */
char extname[] = "EVENT"; /* extension name */
char *ttype[] = { "TIME", "PHA", "DETID","PIXID","DETX","DETY" };
char *tform[] = { "1I", "1I", "1I" ,"1I" ,"1I","1I" };
char *tunit[] = { "s", "\0", "\0" , "\0" "\0","\0"};
long firstrow=1, firstelem=1;
status=0;
sprintf(temp,"%f",tx);
strcat(output_filename,temp);
strcat(output_filename,"_");
bzero(temp,sizeof(temp));
sprintf(temp,"%f",ty);
strcat(output_filename,temp);
strcat(output_filename,".event");
bzero(temp,sizeof(temp));
strcpy(temp,"rm ");
strcat(temp,output_filename);
system(temp);//to remove existing fits file
bzero(temp,sizeof(temp));
if (fits_create_file(&fptr, output_filename, &status))
{
printerror( status );
MPI_Finalize();
}
if ( fits_create_img(fptr, bitpix, 0, 0, &status) )
{
printerror( status );
MPI_Finalize();
}
for(int i=0;i<npes;i++)
{ getTimeEnergy("TimeEnergy",time,energy);
MPI_Recv (x,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+10, MPI_COMM_WORLD, &stat);
MPI_Recv (y,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+20, MPI_COMM_WORLD, &stat);
MPI_Recv (detectorId,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+30, MPI_COMM_WORLD, &stat);
MPI_Recv (pixelNo,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+40, MPI_COMM_WORLD, &stat);
strcpy(extname,"Q");
sprintf(temp,"%d",i);
strcat(extname,temp);
if ( fits_create_tbl( fptr, BINARY_TBL, nrows, tfields, ttype, tform,tunit, extname, &status) )
printerror( status );
fits_write_col(fptr, TINT, 1, firstrow, firstelem, nrows, time,&status);
fits_write_col(fptr,TINT,2,firstrow, firstelem, nrows, energy,&status);
fits_write_col(fptr, TINT, 3, firstrow, firstelem, nrows, detectorId,&status);
fits_write_col(fptr, TINT, 4, firstrow, firstelem, nrows, pixelNo,&status);
fits_write_col(fptr, TINT, 5, firstrow, firstelem, nrows, x,&status);
fits_write_col(fptr, TINT, 6, firstrow, firstelem, nrows, y,&status);
}
if ( fits_close_file(fptr, &status) )
printerror( status );
timeSpent=MPI_Wtime()-timeSpent;
}
MPI_Finalize();
}
//Reading energy of event from input file
void getTimeEnergy(char * filename,int *time,int * energy)
{
FILE *fp;
int i=0;
fp=fopen(filename,"r");
if(fp==NULL)
{
printf("Error(%s:%d):Error while opening %s file\n",__FILE__,__LINE__,filename);
exit(0);
}
for(i=0;i<TOTALACCEPTED;i++)
{
fscanf(fp,"%d",&time[i]);
fscanf(fp,"%d",&energy[i]);
}
fclose(fp);
}
//Initilizing the cuda environment
void cudaInit(int gpuId)
{
hipSetDevice(gpuId);
float *initmalloc;
hipMalloc(&initmalloc,sizeof(float));
}
//function to simulate event files
void executeKernel(float tx,float ty,int myrank,int gpuId,int *totalAccepted,int *totalRejected,int totalGeneratedCount)
{
float height=481,*dphValues=NULL,*dphValues_device=NULL;
int *maskPattern=NULL,*maskPattern_device=NULL,no_elements=64;
int *acceptedCount=NULL,*rejectedCount=NULL,*acceptedCount_device=NULL,*rejectedCount_device=NULL;
int i=0,j=0;
char mask_fileName[100],temp[100];
FILE *fp;
hipError_t cuerr;
int noOfBlock=noBlock,noOfThread=noThread;
int totalThreads=noOfBlock*noOfThread;
int totalElements=totalThreads*no_elements*no_elements;
int countPerThread=totalGeneratedCount/totalThreads;
int remainingCount=totalGeneratedCount%totalThreads;
int *x,*y,*pixelNo,*detectorId;
int *x_device,*y_device,*pixelNo_device,*detectorId_device;
/*
set and initilise the cuda device
*/
cudaInit(gpuId);
/*
Allocation of the memory for the host and device
*/
x=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
y=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
pixelNo=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
detectorId=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
maskPattern=(int*)malloc(sizeof(int)*no_elements*no_elements);
dphValues=(float*)malloc(sizeof(float)*totalElements);
acceptedCount=(int*)malloc(sizeof(int)*totalThreads);
rejectedCount=(int*)malloc(sizeof(int)*totalThreads);
/*
Here add loop for to execute it for each quadrant
*/
strcpy(mask_fileName,"maskpattern/Q");
sprintf(temp,"%d",myrank);
strcat(mask_fileName,temp);
strcat(mask_fileName,"mask.dat");
fp=fopen(mask_fileName,"r");
if(fp==NULL)
{
printf("Error(%s:%d):%s file not exist\n",__FILE__,__LINE__,mask_fileName);
exit(0);
}
for(i=0;i<no_elements;i++)
{
for(j=0;j<no_elements;j++)
{
fscanf(fp,"%d",&maskPattern[((63-i)*no_elements)+j]);
}
}
fclose(fp);
/*
Memory Allocation for device
*/
if((cuerr = hipGetLastError()) != hipSuccess)
{
printf("\nError:(Pre Malloc) \"%s\"\n", hipGetErrorString(cuerr));
}
hipMalloc(&x_device,sizeof(int*)*(totalGeneratedCount));
hipMalloc(&y_device,sizeof(int*)*(totalGeneratedCount));
hipMalloc(&pixelNo_device,sizeof(int*)*(totalGeneratedCount));
hipMalloc(&detectorId_device,sizeof(int*)*(totalGeneratedCount));
hipMalloc(&maskPattern_device,sizeof(int)*no_elements*no_elements);
hipMalloc(&dphValues_device,sizeof(float)*totalElements);
hipMalloc(&acceptedCount_device,sizeof(int)*totalThreads);
hipMalloc(&rejectedCount_device,sizeof(int)*totalThreads);
if((cuerr = hipGetLastError()) != hipSuccess)
{
printf("\nError:(Pre CudaMemcpy) \"%s\"\n", hipGetErrorString(cuerr));
}
//coping mask pattern
hipMemcpy(maskPattern_device,maskPattern,sizeof(int)*no_elements*no_elements,hipMemcpyHostToDevice);
if((cuerr = hipGetLastError()) != hipSuccess)
{
printf("\nError:(Post memcpy) \"%s\"\n", hipGetErrorString(cuerr));
}
//calling kernel
hipLaunchKernelGGL(( kernel), dim3(noOfBlock),dim3(noOfThread), 0, 0, tx,ty,height,maskPattern_device,dphValues_device,acceptedCount_device,rejectedCount_device,countPerThread,remainingCount,x_device,y_device,pixelNo_device,detectorId_device,myrank);
//coping back the results
hipMemcpy(dphValues,dphValues_device,sizeof(float)*totalElements,hipMemcpyDeviceToHost);
hipMemcpy(acceptedCount,acceptedCount_device,sizeof(int)*totalThreads,hipMemcpyDeviceToHost);
hipMemcpy(rejectedCount,rejectedCount_device,sizeof(int)*totalThreads,hipMemcpyDeviceToHost);
hipMemcpy(x,x_device,sizeof(int)*totalGeneratedCount,hipMemcpyDeviceToHost);
hipMemcpy(y,y_device,sizeof(int)*totalGeneratedCount,hipMemcpyDeviceToHost);
hipMemcpy(pixelNo,pixelNo_device,sizeof(int)*totalGeneratedCount,hipMemcpyDeviceToHost);
hipMemcpy(detectorId,detectorId_device,sizeof(int)*totalGeneratedCount,hipMemcpyDeviceToHost);
if((cuerr = hipGetLastError()) != hipSuccess)
{
printf("\nError:(Post dph kernel) \"%s\"\n", hipGetErrorString(cuerr));
}
MPI_Send (x,totalGeneratedCount, MPI_INT, 0,myrank+10, MPI_COMM_WORLD );
MPI_Send (y,totalGeneratedCount, MPI_INT, 0, myrank+20, MPI_COMM_WORLD );
MPI_Send (pixelNo,totalGeneratedCount, MPI_INT, 0, myrank+30, MPI_COMM_WORLD );
MPI_Send (detectorId,totalGeneratedCount, MPI_INT, 0, myrank+40, MPI_COMM_WORLD );
//releasing the memory
hipFree(&y_device);
hipFree(&x_device);
hipFree(&pixelNo_device);
hipFree(&detectorId_device);
hipFree(&maskPattern_device);
hipFree(&dphValues_device);
hipFree(acceptedCount_device);
hipFree(rejectedCount_device);
free(&maskPattern);
free(&acceptedCount);
free(&rejectedCount);
}//end of main
__global__ void kernel(float tx,float ty,float height,int *maskPattern,float * dphValues,int *acceptedCount,int *rejectedCount,int countPerThread,int remainingCount,int*x,int*y,int*pixelNo,int*detectorId,int quadrantId)
{ int index=blockIdx.x*blockDim.x+threadIdx.x;
//Generating event using ray tracing
generateEvent(tx,ty,height,maskPattern,&dphValues[index*64*64],&acceptedCount[index],&rejectedCount[index],countPerThread,&x[index*countPerThread],&y[index*countPerThread],&pixelNo[index*countPerThread],&detectorId[index*countPerThread],quadrantId);
}
//function to convert length to detector module and pixel number
__device__ void getDetectorIdPixelNo(int x,int y,int *moduleNo,int *pixelNo,int quadrantId)
{
int xtemp=0,ytemp=0;
int moduleRows=16,moduleCols=16;
*moduleNo=(x/16)+((y/16)*4);
if(*moduleNo<4)
*moduleNo+=12;
else if(*moduleNo>=4 && *moduleNo<8)
*moduleNo+=4;
else if(*moduleNo>=8 && *moduleNo<12)
*moduleNo-=4;
else
*moduleNo-=12;
xtemp=x;
ytemp=y;
while(xtemp>=16)
xtemp-=16;
while(ytemp>=16)
ytemp-=16;
*pixelNo=(moduleRows-1-ytemp)*moduleCols+xtemp;
if(quadrantId==1||quadrantId==2)
{
*moduleNo=15-*moduleNo;
}
}
//function to perform ray tracing
__device__ void generateEvent(float tx,float ty,float height,int *maskPattern,float * dphValues,int *accepted,int *rejected,int countPerThread,int*x_id,int*y_id,int *pixelNo,int *detectorId ,int quadrantId )
{
float binSize=2.5;
float xmin=0,ymin=0,thetaX=0.0,thetaY=0.0;
float mask_lower_left_x=0,mask_lower_left_y=0,x=0,y=0,x_dect=0,y_dect=0;;
int acceptedCount=0,rejectedCount=0,totalCount=0;
thetaX=ty*(PI/180);
thetaY=tx*(PI/180);
float x_mask=0,y_mask=0;
//inilizing cuda random
hiprandState_t localState;
hiprand_init(0,clock(), 0, &localState);
for(int i=0;i<64;i++)
{
for(int j=0;j<64;j++)
dphValues[i*64+j]=0;
}
while(acceptedCount<countPerThread)
{
totalCount++;
xmin=0;
ymin=0;
//generating random lengths
x=hiprand_uniform (&localState);
x*=159.5;
y=hiprand_uniform (&localState);
y*=159.5;
//getting pixel on the mask plate
x_mask=(x)*10;
x_mask=(int)x_mask;
x_mask/=10;
y_mask=y*10;
y_mask=(int)y_mask;
y_mask/=10;
while(1)
{
if(fmod(x_mask,binSize)<0.01)
break;
x_mask-=0.01;
}
while(1)
{
if(fmod(y_mask,binSize)<0.01)
break;
y_mask-=0.01;
}
x_mask*=10;
x_mask=(int)x_mask;
x_mask/=10;
y_mask*=10;
y_mask=(int)y_mask;
y_mask/=10;
//checking mask is open or close
if(maskPattern[(int)(((x_mask/binSize)*64)+(y_mask/binSize))]==1)
{
x_dect=x-(height*(tan(thetaX)));
y_dect=y-(height*(tan(thetaY)));
while(x>=((xmin+16)*binSize))
{
if(xmin==48.0f)
break;
xmin+=16;
}
while(y>=((ymin+16)*binSize))
{
if(ymin==48.0f)
break;
ymin+=16;
}
if(x_dect<(xmin*binSize)|| (((xmin+16)*binSize)-x_dect)<0.1||y_dect<(ymin*binSize)||(((ymin+16)*binSize)-y_dect)<0.1)
{
rejectedCount++;
}
else
{
mask_lower_left_x=(x_dect)*10;
mask_lower_left_x=(int)mask_lower_left_x;
mask_lower_left_x/=10;
mask_lower_left_y=y_dect*10;
mask_lower_left_y=(int)mask_lower_left_y;
mask_lower_left_y/=10;
while(1)
{
if(fmod(mask_lower_left_x,binSize)<0.01)
break;
mask_lower_left_x-=0.01;
}
while(1)
{
if(fmod(mask_lower_left_y,binSize)<0.01)
break;
mask_lower_left_y-=0.01;
}
mask_lower_left_x*=10;
mask_lower_left_x=(int)mask_lower_left_x;
mask_lower_left_x/=10;
mask_lower_left_y*=10;
mask_lower_left_y=(int)mask_lower_left_y;
mask_lower_left_y/=10;
/*Get Pixel No and detector Id*/
x_id[acceptedCount]=(int)(mask_lower_left_y/2.5);
y_id[acceptedCount]=(int)(mask_lower_left_x/2.5);
getDetectorIdPixelNo(x_id[acceptedCount],y_id[acceptedCount],&detectorId[acceptedCount],&pixelNo[acceptedCount],quadrantId);
acceptedCount++;
}//end of else i.e., pixel is outside of the detector
}
else
{
rejectedCount++;
}
}//end of the while
accepted[0]=acceptedCount;
rejected[0]=rejectedCount;
}
void printerror( int status)
{
if (status)
{
fits_report_error(stderr, status);
exit( status );
}
return;
}
| 32549aeb5ff675e88e6654234dc286ac02087d20.cu | /*
This program generates the DPH for given input angle thetaX and thetaY
Here the total no of photons accepted is 2000000
This program uses GPU computing, To execute it for all the four quadrant use the command
mpirun -np 3 -host cn001 ./cudaevent 2.3215 2.3215 : -np 1 -host cn002 ./cudaevent
This will use three gpu cards from first node and one gpu card from second node.
Author: Ajay Vibhute
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
#include<time.h>
#include<sys/time.h>
#include<cuda.h>
#include <curand_kernel.h>
#include "fitsio.h"
#include <mpi.h>
#define PI 3.14159265
#define noBlock 10
#define noThread 256
#define TOTALACCEPTED 25600
//declaring the required functions
void printerror( int );
__global__ void kernel(float tx,float ty,float height,int *maskPattern,float * dphValues,int *,int *,int,int,int*,int*,int*,int*,int);
__device__ void generateEvent(float tx,float ty,float height,int *maskPattern,float * dphValues,int *,int*,int,int*,int*,int*,int*,int);
void executeKernel(float tx,float ty,int myrank,int gpuId,int*accepted,int*rejected,int);
__device__ void getDetectorIdPixelNo(int x,int y,int *moduleNo,int *pixelNo,int detectorId);
void getTimeEnergy(char * filename,int *time,int * energy);
void cudaInit(int gpuId);
//start of main
int main(int argc,char*argv[])
{
int myrank=0,npes=0;
float timeSpent=0.0;
fitsfile *fptr=NULL;
int status=0;
char output_filename[100] ="",temp[100],hostname[100];
int bitpix = FLOAT_IMG; /* 16-bit unsigned short pixel values */
float tx=0,ty=0;
int gpuId=0,totalAccepted=0,totalRejected=0,totalGeneratedCount=0,totalGeneratedCountMean=0;
int *x=NULL,*y=NULL,*detectorId=NULL,*pixelNo=NULL, *time=NULL,*energy=NULL;
MPI_Status stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&npes);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
totalGeneratedCountMean++;
totalGeneratedCount=TOTALACCEPTED;//2001920;
//reading inputs at rank 0
if(myrank==0)
{
int buf[TOTALACCEPTED];
MPI_Buffer_attach( buf, TOTALACCEPTED+1000 );
timeSpent=MPI_Wtime();
if(argv[1]==NULL||argv[2]==NULL)
{
printf("Enter value for ThetaX\n");
scanf("%f",&tx);
printf("Enter value for ThetaY\n");
scanf("%f",&ty);
}
else
{
tx=(float)atof(argv[1]);
ty=(float)atof(argv[2]);
}
/*
Calculate the total generated Count by using poisson distribution.
*/
}
MPI_Barrier(MPI_COMM_WORLD);
//Broadcasting the inputs
MPI_Bcast(&tx,1,MPI_FLOAT,0,MPI_COMM_WORLD);
MPI_Bcast(&ty,1,MPI_FLOAT,0,MPI_COMM_WORLD);
//Assigning GPU cards
if(myrank!=3)
{
gpuId=myrank;
}
else
{
//The last MPI process will run on another node, so first GPU card will be used.
gpuId=0;
}
//calling kernal
executeKernel(tx,ty,myrank,gpuId,&totalAccepted,&totalRejected,totalGeneratedCount);
gethostname(hostname,sizeof(hostname));
if(myrank==0)
{
//writing event file
time=(int*)malloc(sizeof(int)*totalGeneratedCount);
energy=(int*)malloc(sizeof(int)*totalGeneratedCount);
x=(int*)malloc(sizeof(int)*totalGeneratedCount);
y=(int*)malloc(sizeof(int)*totalGeneratedCount);
detectorId=(int*)malloc(sizeof(int)*totalGeneratedCount);
pixelNo=(int*)malloc(sizeof(int)*totalGeneratedCount);
int tfields =6; /* table will have 3 columns */
long nrows = TOTALACCEPTED; /* table will have 6 rows */
char extname[] = "EVENT"; /* extension name */
char *ttype[] = { "TIME", "PHA", "DETID","PIXID","DETX","DETY" };
char *tform[] = { "1I", "1I", "1I" ,"1I" ,"1I","1I" };
char *tunit[] = { "s", "\0", "\0" , "\0" "\0","\0"};
long firstrow=1, firstelem=1;
status=0;
sprintf(temp,"%f",tx);
strcat(output_filename,temp);
strcat(output_filename,"_");
bzero(temp,sizeof(temp));
sprintf(temp,"%f",ty);
strcat(output_filename,temp);
strcat(output_filename,".event");
bzero(temp,sizeof(temp));
strcpy(temp,"rm ");
strcat(temp,output_filename);
system(temp);//to remove existing fits file
bzero(temp,sizeof(temp));
if (fits_create_file(&fptr, output_filename, &status))
{
printerror( status );
MPI_Finalize();
}
if ( fits_create_img(fptr, bitpix, 0, 0, &status) )
{
printerror( status );
MPI_Finalize();
}
for(int i=0;i<npes;i++)
{ getTimeEnergy("TimeEnergy",time,energy);
MPI_Recv (x,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+10, MPI_COMM_WORLD, &stat);
MPI_Recv (y,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+20, MPI_COMM_WORLD, &stat);
MPI_Recv (detectorId,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+30, MPI_COMM_WORLD, &stat);
MPI_Recv (pixelNo,totalGeneratedCount, MPI_INT, MPI_ANY_SOURCE,i+40, MPI_COMM_WORLD, &stat);
strcpy(extname,"Q");
sprintf(temp,"%d",i);
strcat(extname,temp);
if ( fits_create_tbl( fptr, BINARY_TBL, nrows, tfields, ttype, tform,tunit, extname, &status) )
printerror( status );
fits_write_col(fptr, TINT, 1, firstrow, firstelem, nrows, time,&status);
fits_write_col(fptr,TINT,2,firstrow, firstelem, nrows, energy,&status);
fits_write_col(fptr, TINT, 3, firstrow, firstelem, nrows, detectorId,&status);
fits_write_col(fptr, TINT, 4, firstrow, firstelem, nrows, pixelNo,&status);
fits_write_col(fptr, TINT, 5, firstrow, firstelem, nrows, x,&status);
fits_write_col(fptr, TINT, 6, firstrow, firstelem, nrows, y,&status);
}
if ( fits_close_file(fptr, &status) )
printerror( status );
timeSpent=MPI_Wtime()-timeSpent;
}
MPI_Finalize();
}
//Reading energy of event from input file
void getTimeEnergy(char * filename,int *time,int * energy)
{
FILE *fp;
int i=0;
fp=fopen(filename,"r");
if(fp==NULL)
{
printf("Error(%s:%d):Error while opening %s file\n",__FILE__,__LINE__,filename);
exit(0);
}
for(i=0;i<TOTALACCEPTED;i++)
{
fscanf(fp,"%d",&time[i]);
fscanf(fp,"%d",&energy[i]);
}
fclose(fp);
}
//Initilizing the cuda environment
void cudaInit(int gpuId)
{
cudaSetDevice(gpuId);
float *initmalloc;
cudaMalloc(&initmalloc,sizeof(float));
}
//function to simulate event files
void executeKernel(float tx,float ty,int myrank,int gpuId,int *totalAccepted,int *totalRejected,int totalGeneratedCount)
{
float height=481,*dphValues=NULL,*dphValues_device=NULL;
int *maskPattern=NULL,*maskPattern_device=NULL,no_elements=64;
int *acceptedCount=NULL,*rejectedCount=NULL,*acceptedCount_device=NULL,*rejectedCount_device=NULL;
int i=0,j=0;
char mask_fileName[100],temp[100];
FILE *fp;
cudaError_t cuerr;
int noOfBlock=noBlock,noOfThread=noThread;
int totalThreads=noOfBlock*noOfThread;
int totalElements=totalThreads*no_elements*no_elements;
int countPerThread=totalGeneratedCount/totalThreads;
int remainingCount=totalGeneratedCount%totalThreads;
int *x,*y,*pixelNo,*detectorId;
int *x_device,*y_device,*pixelNo_device,*detectorId_device;
/*
set and initilise the cuda device
*/
cudaInit(gpuId);
/*
Allocation of the memory for the host and device
*/
x=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
y=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
pixelNo=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
detectorId=(int*)malloc(sizeof(int*)*(totalGeneratedCount));
maskPattern=(int*)malloc(sizeof(int)*no_elements*no_elements);
dphValues=(float*)malloc(sizeof(float)*totalElements);
acceptedCount=(int*)malloc(sizeof(int)*totalThreads);
rejectedCount=(int*)malloc(sizeof(int)*totalThreads);
/*
Here add loop for to execute it for each quadrant
*/
strcpy(mask_fileName,"maskpattern/Q");
sprintf(temp,"%d",myrank);
strcat(mask_fileName,temp);
strcat(mask_fileName,"mask.dat");
fp=fopen(mask_fileName,"r");
if(fp==NULL)
{
printf("Error(%s:%d):%s file not exist\n",__FILE__,__LINE__,mask_fileName);
exit(0);
}
for(i=0;i<no_elements;i++)
{
for(j=0;j<no_elements;j++)
{
fscanf(fp,"%d",&maskPattern[((63-i)*no_elements)+j]);
}
}
fclose(fp);
/*
Memory Allocation for device
*/
if((cuerr = cudaGetLastError()) != cudaSuccess)
{
printf("\nError:(Pre Malloc) \"%s\"\n", cudaGetErrorString(cuerr));
}
cudaMalloc(&x_device,sizeof(int*)*(totalGeneratedCount));
cudaMalloc(&y_device,sizeof(int*)*(totalGeneratedCount));
cudaMalloc(&pixelNo_device,sizeof(int*)*(totalGeneratedCount));
cudaMalloc(&detectorId_device,sizeof(int*)*(totalGeneratedCount));
cudaMalloc(&maskPattern_device,sizeof(int)*no_elements*no_elements);
cudaMalloc(&dphValues_device,sizeof(float)*totalElements);
cudaMalloc(&acceptedCount_device,sizeof(int)*totalThreads);
cudaMalloc(&rejectedCount_device,sizeof(int)*totalThreads);
if((cuerr = cudaGetLastError()) != cudaSuccess)
{
printf("\nError:(Pre CudaMemcpy) \"%s\"\n", cudaGetErrorString(cuerr));
}
//coping mask pattern
cudaMemcpy(maskPattern_device,maskPattern,sizeof(int)*no_elements*no_elements,cudaMemcpyHostToDevice);
if((cuerr = cudaGetLastError()) != cudaSuccess)
{
printf("\nError:(Post memcpy) \"%s\"\n", cudaGetErrorString(cuerr));
}
//calling kernel
kernel<<<noOfBlock,noOfThread>>>(tx,ty,height,maskPattern_device,dphValues_device,acceptedCount_device,rejectedCount_device,countPerThread,remainingCount,x_device,y_device,pixelNo_device,detectorId_device,myrank);
//coping back the results
cudaMemcpy(dphValues,dphValues_device,sizeof(float)*totalElements,cudaMemcpyDeviceToHost);
cudaMemcpy(acceptedCount,acceptedCount_device,sizeof(int)*totalThreads,cudaMemcpyDeviceToHost);
cudaMemcpy(rejectedCount,rejectedCount_device,sizeof(int)*totalThreads,cudaMemcpyDeviceToHost);
cudaMemcpy(x,x_device,sizeof(int)*totalGeneratedCount,cudaMemcpyDeviceToHost);
cudaMemcpy(y,y_device,sizeof(int)*totalGeneratedCount,cudaMemcpyDeviceToHost);
cudaMemcpy(pixelNo,pixelNo_device,sizeof(int)*totalGeneratedCount,cudaMemcpyDeviceToHost);
cudaMemcpy(detectorId,detectorId_device,sizeof(int)*totalGeneratedCount,cudaMemcpyDeviceToHost);
if((cuerr = cudaGetLastError()) != cudaSuccess)
{
printf("\nError:(Post dph kernel) \"%s\"\n", cudaGetErrorString(cuerr));
}
MPI_Send (x,totalGeneratedCount, MPI_INT, 0,myrank+10, MPI_COMM_WORLD );
MPI_Send (y,totalGeneratedCount, MPI_INT, 0, myrank+20, MPI_COMM_WORLD );
MPI_Send (pixelNo,totalGeneratedCount, MPI_INT, 0, myrank+30, MPI_COMM_WORLD );
MPI_Send (detectorId,totalGeneratedCount, MPI_INT, 0, myrank+40, MPI_COMM_WORLD );
//releasing the memory
cudaFree(&y_device);
cudaFree(&x_device);
cudaFree(&pixelNo_device);
cudaFree(&detectorId_device);
cudaFree(&maskPattern_device);
cudaFree(&dphValues_device);
cudaFree(acceptedCount_device);
cudaFree(rejectedCount_device);
free(&maskPattern);
free(&acceptedCount);
free(&rejectedCount);
}//end of main
__global__ void kernel(float tx,float ty,float height,int *maskPattern,float * dphValues,int *acceptedCount,int *rejectedCount,int countPerThread,int remainingCount,int*x,int*y,int*pixelNo,int*detectorId,int quadrantId)
{ int index=blockIdx.x*blockDim.x+threadIdx.x;
//Generating event using ray tracing
generateEvent(tx,ty,height,maskPattern,&dphValues[index*64*64],&acceptedCount[index],&rejectedCount[index],countPerThread,&x[index*countPerThread],&y[index*countPerThread],&pixelNo[index*countPerThread],&detectorId[index*countPerThread],quadrantId);
}
//function to convert length to detector module and pixel number
__device__ void getDetectorIdPixelNo(int x,int y,int *moduleNo,int *pixelNo,int quadrantId)
{
int xtemp=0,ytemp=0;
int moduleRows=16,moduleCols=16;
*moduleNo=(x/16)+((y/16)*4);
if(*moduleNo<4)
*moduleNo+=12;
else if(*moduleNo>=4 && *moduleNo<8)
*moduleNo+=4;
else if(*moduleNo>=8 && *moduleNo<12)
*moduleNo-=4;
else
*moduleNo-=12;
xtemp=x;
ytemp=y;
while(xtemp>=16)
xtemp-=16;
while(ytemp>=16)
ytemp-=16;
*pixelNo=(moduleRows-1-ytemp)*moduleCols+xtemp;
if(quadrantId==1||quadrantId==2)
{
*moduleNo=15-*moduleNo;
}
}
//function to perform ray tracing
__device__ void generateEvent(float tx,float ty,float height,int *maskPattern,float * dphValues,int *accepted,int *rejected,int countPerThread,int*x_id,int*y_id,int *pixelNo,int *detectorId ,int quadrantId )
{
float binSize=2.5;
float xmin=0,ymin=0,thetaX=0.0,thetaY=0.0;
float mask_lower_left_x=0,mask_lower_left_y=0,x=0,y=0,x_dect=0,y_dect=0;;
int acceptedCount=0,rejectedCount=0,totalCount=0;
thetaX=ty*(PI/180);
thetaY=tx*(PI/180);
float x_mask=0,y_mask=0;
//inilizing cuda random
curandState localState;
curand_init(0,clock(), 0, &localState);
for(int i=0;i<64;i++)
{
for(int j=0;j<64;j++)
dphValues[i*64+j]=0;
}
while(acceptedCount<countPerThread)
{
totalCount++;
xmin=0;
ymin=0;
//generating random lengths
x=curand_uniform (&localState);
x*=159.5;
y=curand_uniform (&localState);
y*=159.5;
//getting pixel on the mask plate
x_mask=(x)*10;
x_mask=(int)x_mask;
x_mask/=10;
y_mask=y*10;
y_mask=(int)y_mask;
y_mask/=10;
while(1)
{
if(fmod(x_mask,binSize)<0.01)
break;
x_mask-=0.01;
}
while(1)
{
if(fmod(y_mask,binSize)<0.01)
break;
y_mask-=0.01;
}
x_mask*=10;
x_mask=(int)x_mask;
x_mask/=10;
y_mask*=10;
y_mask=(int)y_mask;
y_mask/=10;
//checking mask is open or close
if(maskPattern[(int)(((x_mask/binSize)*64)+(y_mask/binSize))]==1)
{
x_dect=x-(height*(tan(thetaX)));
y_dect=y-(height*(tan(thetaY)));
while(x>=((xmin+16)*binSize))
{
if(xmin==48.0f)
break;
xmin+=16;
}
while(y>=((ymin+16)*binSize))
{
if(ymin==48.0f)
break;
ymin+=16;
}
if(x_dect<(xmin*binSize)|| (((xmin+16)*binSize)-x_dect)<0.1||y_dect<(ymin*binSize)||(((ymin+16)*binSize)-y_dect)<0.1)
{
rejectedCount++;
}
else
{
mask_lower_left_x=(x_dect)*10;
mask_lower_left_x=(int)mask_lower_left_x;
mask_lower_left_x/=10;
mask_lower_left_y=y_dect*10;
mask_lower_left_y=(int)mask_lower_left_y;
mask_lower_left_y/=10;
while(1)
{
if(fmod(mask_lower_left_x,binSize)<0.01)
break;
mask_lower_left_x-=0.01;
}
while(1)
{
if(fmod(mask_lower_left_y,binSize)<0.01)
break;
mask_lower_left_y-=0.01;
}
mask_lower_left_x*=10;
mask_lower_left_x=(int)mask_lower_left_x;
mask_lower_left_x/=10;
mask_lower_left_y*=10;
mask_lower_left_y=(int)mask_lower_left_y;
mask_lower_left_y/=10;
/*Get Pixel No and detector Id*/
x_id[acceptedCount]=(int)(mask_lower_left_y/2.5);
y_id[acceptedCount]=(int)(mask_lower_left_x/2.5);
getDetectorIdPixelNo(x_id[acceptedCount],y_id[acceptedCount],&detectorId[acceptedCount],&pixelNo[acceptedCount],quadrantId);
acceptedCount++;
}//end of else i.e., pixel is outside of the detector
}
else
{
rejectedCount++;
}
}//end of the while
accepted[0]=acceptedCount;
rejected[0]=rejectedCount;
}
void printerror( int status)
{
if (status)
{
fits_report_error(stderr, status);
exit( status );
}
return;
}
|
e22ba30a964e1b42f9015cc049460a669a44d6a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "g_One_wgrad_Add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *_WgradTmp = NULL;
hipMalloc(&_WgradTmp, XSIZE*YSIZE);
float *Wgrad = NULL;
hipMalloc(&Wgrad, XSIZE*YSIZE);
float *w = NULL;
hipMalloc(&w, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int channels = 1;
float lambda = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
g_One_wgrad_Add), dim3(gridBlock),dim3(threadBlock), 0, 0, _WgradTmp,Wgrad,w,rows,cols,channels,lambda);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
g_One_wgrad_Add), dim3(gridBlock),dim3(threadBlock), 0, 0, _WgradTmp,Wgrad,w,rows,cols,channels,lambda);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
g_One_wgrad_Add), dim3(gridBlock),dim3(threadBlock), 0, 0, _WgradTmp,Wgrad,w,rows,cols,channels,lambda);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e22ba30a964e1b42f9015cc049460a669a44d6a2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "g_One_wgrad_Add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *_WgradTmp = NULL;
cudaMalloc(&_WgradTmp, XSIZE*YSIZE);
float *Wgrad = NULL;
cudaMalloc(&Wgrad, XSIZE*YSIZE);
float *w = NULL;
cudaMalloc(&w, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int channels = 1;
float lambda = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
g_One_wgrad_Add<<<gridBlock,threadBlock>>>(_WgradTmp,Wgrad,w,rows,cols,channels,lambda);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
g_One_wgrad_Add<<<gridBlock,threadBlock>>>(_WgradTmp,Wgrad,w,rows,cols,channels,lambda);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
g_One_wgrad_Add<<<gridBlock,threadBlock>>>(_WgradTmp,Wgrad,w,rows,cols,channels,lambda);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
170ae582ebe9edbd4e1e22ae6df90a937f176166.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #define EIGEN_NO_MALLOC
#define NDEBUG // VERY VERY IMPORTANT FOR PERFORMANCE!!
#include <stdio.h>
#include <algorithm>
#include <Eigen/Dense>
#include <math.h>
#include <vector>
using namespace Eigen;
using namespace std;
#define NVERTS 484
#define NFACES 940
#define N_JOINT_INFLUENCES 4
#define NJOINTS {njoints}
#define RESOLUTION_X {resx}
#define RESOLUTION_Y {resy}
#define NUMPIXELS_PER_MOUSE RESOLUTION_X*RESOLUTION_Y
// #define SHITTYSHITTYHACK
// ==============================
// Helpers!
// ==============================
struct GLVertex
{{
float x;
float y;
float z;
}};
struct BoundingBox
{{
GLVertex lowerLeft;
GLVertex upperRight;
}};
struct GLTriangleFace
{{
unsigned short v0;
unsigned short v1;
unsigned short v2;
}};
struct GLTriangle
{{
GLVertex a;
GLVertex b;
GLVertex c;
}};
struct JointWeights
{{
float w[N_JOINT_INFLUENCES];
}};
struct JointWeightIndices
{{
unsigned short idx[N_JOINT_INFLUENCES];
}};
struct Plain4x4Matrix_f
{{
float matrix[16];
}};
struct Plain4x4Matrix_us
{{
unsigned short matrix[16];
}};
__device__ inline float deg2rad(float deg)
{{
return M_PI / 180.0f * deg;
}}
__device__ inline float rad2deg(float rad)
{{
return rad * 180.0f / M_PI;
}}
__device__ inline Vector3f getLowerLeftOfTriangle(Vector3f a, Vector3f b, Vector3f c)
{{
float x,y,z;
x = fminf(fminf(a(0),b(0)),c(0));
y = fminf(fminf(a(1),b(1)),c(1));
z = fminf(fminf(a(2),b(2)),c(2));
return Vector3f(x,y,z);
}}
__device__ inline Vector3f getUpperRightOfTriangle(Vector3f a, Vector3f b, Vector3f c)
{{
float x,y,z;
x = fmaxf(fmaxf(a(0),b(0)),c(0));
y = fmaxf(fmaxf(a(1),b(1)),c(1));
z = fmaxf(fmaxf(a(2),b(2)),c(2));
return Vector3f(x,y,z);
}}
// a,b,c are the vertices of the reference triangle
// This takes 3D vectors, because our points are mostly in 3D. It's for convenience, not correctness.
__device__ inline Vector3f calcBarycentricCoordinate(Vector3f vec, Vector3f a, Vector3f b, Vector3f c)
{{
float den = 1 / ((b(1) - c(1)) * (a(0) - c(0)) + (c(0) - b(0)) * (a(1) - c(1)));
float x = ((b(1) - c(1)) * (vec(0) - c(0)) + (c(0) - b(0)) * (vec(1) - c(1))) * den;
float y = ((c(1) - a(1)) * (vec(0) - c(0)) + (a(0) - c(0)) * (vec(1) - c(1))) * den;
float z = 1.0 - x - y;
return Vector3f(x,y,z);
}}
__device__ inline bool isBarycentricCoordinateInBounds(Vector3f barycentricCoord)
{{
return barycentricCoord(0) >= 0.0 && barycentricCoord(0) <= 1.0 &&
barycentricCoord(1) >= 0.0 && barycentricCoord(1) <= 1.0 &&
barycentricCoord(2) >= 0.0 && barycentricCoord(2)<= 1.0;
}}
__device__ inline float getZAtBarycentricCoordinate(Vector3f barycentricCoord, Vector3f a, Vector3f b, Vector3f c)
{{
return barycentricCoord(0)*a(2) + barycentricCoord(1)*b(2) + barycentricCoord(2)*c(2);
}}
__device__ inline Matrix3f rotateMatrix3D(float rotx, float roty, float rotz) {{
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix3f Rx = Matrix3f::Identity();
Matrix3f Ry = Matrix3f::Identity();
Matrix3f Rz = Matrix3f::Identity();
Rx = Rx*cx;
Ry = Ry*cy;
Rz = Rz*cz;
Rx(0,0) += 1.0 - cx;
Ry(1,1) += 1.0 - cy;
Rz(2,2) += 1.0 - cz;
Rx(1,2) += -sx;
Rx(2,1) += sx;
Ry(0,2) += sy;
Ry(2,0) += -sy;
Rz(0,1) += -sz;
Rz(1,0) += sz;
Matrix3f t;
t = Rx*Ry*Rz;
return t;
}}
__device__ inline Matrix4f rotateMatrix2(float rotx, float roty, float rotz) {{
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix4f Rx = Matrix4f::Identity();
Matrix4f Ry = Matrix4f::Identity();
Matrix4f Rz = Matrix4f::Identity();
Rx = Rx*cx;
Ry = Ry*cy;
Rz = Rz*cz;
Rx(0,0) += 1.0 - cx;
Ry(1,1) += 1.0 - cy;
Rz(2,2) += 1.0 - cz;
Rx(1,2) += -sx;
Rx(2,1) += sx;
Ry(0,2) += sy;
Ry(2,0) += -sy;
Rz(0,1) += -sz;
Rz(1,0) += sz;
Matrix4f t = Rx*Ry;
return t;
}}
__device__ inline Matrix4f rotateMatrix(float rotx, float roty, float rotz) {{
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix4f Rx = Matrix4f::Identity();
Matrix4f Ry = Matrix4f::Identity();
Matrix4f Rz = Matrix4f::Identity();
// Right-handed convention
Rx(1,1) = cx;
Rx(1,2) = sx;
Rx(2,1) = -sx;
Rx(2,2) = cx;
Ry(0,0) = cy;
Ry(0,2) = -sy;
Ry(2,0) = sy;
Ry(2,2) = cy;
Rz(0,0) = cz;
Rz(0,1) = sz;
Rz(1,0) = -sz;
Rz(2,2) = cz;
Matrix4f t = Matrix4f::Identity();
t = t*Rz*Ry*Rx;
return t;
}}
__device__ inline void translate(Matrix4f &transform, float transx, float transy, float transz)
{{
transform(0,3) += transx;
transform(1,3) += transy;
transform(2,3) += transz;
}}
__device__ inline Matrix4f translateMatrix(float transx, float transy, float transz)
{{
Matrix4f t = Matrix4f::Identity();
t(0,3) = transx;
t(1,3) = transy;
t(2,3) = transz;
return t;
}}
__device__ inline Matrix3f scaleMatrix3D(float scalex, float scaley, float scalez)
{{
Matrix3f t = Matrix3f::Identity();
t(0,0) = scalex;
t(1,1) = scaley;
t(2,2) = scalez;
return t;
}}
__device__ inline Matrix4f scaleMatrix(float scalex, float scaley, float scalez)
{{
Matrix4f t = Matrix4f::Identity();
t(0,0) = scalex;
t(1,1) = scaley;
t(2,2) = scalez;
return t;
}}
__device__ inline Matrix4f EigenMatFromMemory(float *mat4inMemory)
{{
Matrix4f m;
m <<
mat4inMemory[0], mat4inMemory[1], mat4inMemory[2], mat4inMemory[3],
mat4inMemory[4], mat4inMemory[5], mat4inMemory[6], mat4inMemory[7],
mat4inMemory[8], mat4inMemory[9], mat4inMemory[10], mat4inMemory[11],
mat4inMemory[12], mat4inMemory[13], mat4inMemory[14], mat4inMemory[15];
return m;
}}
__device__ void copyMat4x4ToEigen(Plain4x4Matrix_f plainMat, Matrix4f &EigenMat)
{{
for (int i=0; i < 4; ++i) {{
for (int j=0; j < 4; ++j) {{
int idx = i*4 +j;
EigenMat(i,j) = plainMat.matrix[idx];
}}
}}
}}
__device__ void copyEigenToMat4x4(Plain4x4Matrix_f plainMat, Matrix4f &EigenMat)
{{
for (int i=0; i < 4; ++i) {{
for (int j=0; j < 4; ++j) {{
int idx = i*4 +j;
plainMat.matrix[idx] = EigenMat(i,j);
}}
}}
}}
__device__ void printEigenMat(Matrix4f someMatrix)
{{
for (int j=0; j < 4; ++j) {{
printf("\t%2.3f, %2.3f, %2.3f, %2.3f,\n",
someMatrix(j,0),
someMatrix(j,1),
someMatrix(j,2),
someMatrix(j,3));
}}
printf("\n");
}}
// ==============================
// Here's the actual work here!
// ==============================
extern "C"
__global__ void rasterizeSerial(GLVertex *skinnedVertices,
GLVertex *vertices, // REMOVE THIS WHEN FK+SKINNING ARE IMPLEMENTED
GLTriangleFace *triangles,
float *synthPixels)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
// Make sure we're looking at the right data
skinnedVertices += NVERTS*(bw*bx + tx);
#ifdef SHITTYSHITTYHACK
// ======================================================================
// HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
// HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
// HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
// Remove when FK and skinning are implemented
Matrix3f scale_matrix = scaleMatrix3D(RESOLUTION_X*0.3, RESOLUTION_Y*0.3, 24.0);
Vector3f translate_vector(RESOLUTION_X/2, RESOLUTION_Y/2, 0.);
for (int i=0; i < NVERTS; ++i) {{
// Grab from memory
Vector3f v(vertices[i].x, vertices[i].y, vertices[i].z);
// Transform to screen space
v = scale_matrix*v;
v = v+translate_vector;
skinnedVertices[i].y = v(1);
skinnedVertices[i].z = v(2);
}}
// END HACK END HACK END HACK END HACK END HACK END HACK END HACK END HACK
// END HACK END HACK END HACK END HACK END HACK END HACK END HACK END HACK
// END HACK END HACK END HACK END HACK END HACK END HACK END HACK END HACK
// ======================================================================
#endif
int depthBufferOffset = NUMPIXELS_PER_MOUSE*(bx*bw + tx);
// For each triangle, rasterize the crap out of it
// (for now, don't care about overlaps)
for (int iface=0; iface < NFACES; ++iface)
{{
unsigned short i0 = triangles[iface].v0;
unsigned short i1 = triangles[iface].v1;
unsigned short i2 = triangles[iface].v2;
// NOTE THE SWAP MANG
// MAYA's coordinates are left-handed, which I liketh not.
Vector3f a(skinnedVertices[i0].x, skinnedVertices[i0].y, skinnedVertices[i0].z);
Vector3f b(skinnedVertices[i1].x, skinnedVertices[i1].y, skinnedVertices[i1].z);;
Vector3f c(skinnedVertices[i2].x, skinnedVertices[i2].y, skinnedVertices[i2].z);;
Vector3f ll = getLowerLeftOfTriangle(a,b,c);
Vector3f ur = getUpperRightOfTriangle(a,b,c);
for (int i=ll(1); i < ur(1); ++i) {{
for (int j=ll(0); j < ur(0); ++j) {{
Vector3f pt(j+0.5,i+0.5,0);
Vector3f baryCoord = calcBarycentricCoordinate(pt,a,b,c);
bool inTriangle = isBarycentricCoordinateInBounds(baryCoord);
if (inTriangle) {{
float interpZ = getZAtBarycentricCoordinate(baryCoord,a,b,c);
long int idx = i*RESOLUTION_X + j;
idx += depthBufferOffset;
float oldval = synthPixels[idx];
if (oldval <= interpZ) {{
atomicExch(&synthPixels[idx], interpZ);
}}
}}
}}
}}
}}
}}
extern "C"
// Fastest with 10 blocks, 256 threads
// Also, faster than the cache version.
__global__ void likelihoodSerial(float *synthPixels,
float *realPixels,
float *likelihood)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
int synthPixelOffset = NUMPIXELS_PER_MOUSE*mouseIdx;
float accumulator = 0.0;
for (int i=0; i < NUMPIXELS_PER_MOUSE; ++i) {{
accumulator += abs(realPixels[i] - synthPixels[i+synthPixelOffset]);
}}
atomicExch(&likelihood[mouseIdx], accumulator);
}}
extern "C"
__global__ void skinningSerial(Plain4x4Matrix_f *jointTransforms,
GLVertex *vertices,
JointWeights *jointWeights,
JointWeightIndices *jointWeightIndices,
GLVertex *skinnedVertices)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
jointTransforms += mouseIdx*NJOINTS;
skinnedVertices += mouseIdx*NVERTS;
// Calculate a joint's local rotation matrix
Vector4f vertex;
vertex << 0.0, 0.0, 0.0, 0.0;
// Grab the joint transformations, put them in a usable format
// (All matrix multiplication is done w/ Eigen)
Matrix4f theseJoints[NJOINTS];
for (int i=0; i < NJOINTS; ++i) {{
theseJoints[i] = Matrix4f(jointTransforms[i].matrix);
}}
// Precalculate some scaling matrices
Matrix4f scale_matrix = scaleMatrix(RESOLUTION_X*0.3, RESOLUTION_Y*0.3, 24.0);
Vector4f translate_vector(RESOLUTION_X/2, RESOLUTION_Y/2, 0., 0.0);
for (int i=0; i < NVERTS; ++i) {{
// Grab the unposed vertex
Vector4f vertex(vertices[i].x, vertices[i].z, vertices[i].y, 1.0);
// Make our destination vertex
Vector4f skinnedVertex(0., 0., 0., 0.);
for (int ijoint=0; ijoint<N_JOINT_INFLUENCES; ++ijoint) {{
int index = jointWeightIndices[i].idx[ijoint];
float weight = jointWeights[i].w[ijoint];
skinnedVertex += weight*theseJoints[index]*vertex;
}}
// After we've computed the weighted skin position,
// then we'll scale and translate it into a proper skin space
skinnedVertex = scale_matrix*skinnedVertex;
skinnedVertex = skinnedVertex+translate_vector;
skinnedVertices[i].x = skinnedVertex(0);
skinnedVertices[i].y = skinnedVertex(1);
skinnedVertices[i].z = skinnedVertex(2);
}}
}}
__device__ Matrix4f calculateEMatrix(GLVertex angle, GLVertex translation)
{{
float rotx = angle.x;
float roty = angle.y;
float rotz = angle.z;
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix4f out = Matrix4f::Identity();
out << cy*cz, cy*sz, -sy , translation.x,
-cx*sz+sx*sy*cz, cx*cz+sx*sy*sz, sx*cy, translation.y,
sx*sz+sy*cx*cz, -sx*cz+cx*sy*sz, cx*cy, translation.z,
0.0, 0.0, 0.0, 1.0;
return out;
}}
extern "C"
__global__ void FKSerial(GLVertex *baseRotations,
GLVertex *rotations,
GLVertex *translations,
Plain4x4Matrix_f *jointTransforms)
{{
// NOTE:
// - The E inverse could be optimized.
// Notation:
// M - skinning matrix. You can multiply an unposed vector into M and get a posed vector.
// E - local transformation matrix. Represents a rotation and translation from (0,0)
// "Fixed" matrix - a matrix computed using defualt, or unposed, rotations
// "Changed" matrix - a matrix computed using non-default, or posed, rotations
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
rotations += mouseIdx*NJOINTS;
jointTransforms += mouseIdx*NJOINTS;
Matrix4f fixedE[NJOINTS];
Matrix4f fixedM[NJOINTS];
Matrix4f changedE[NJOINTS];
Matrix4f changedM[NJOINTS];
Matrix4f M[NJOINTS];
// == Get the fixed E's.
// ========================================
for (int i=0; i < NJOINTS; ++i) {{
fixedE[i] = calculateEMatrix(baseRotations[i], translations[i]);
}}
// == Get the fixed M's.
// ========================================
fixedM[0] = fixedE[0].inverse();
for (int i=1; i < NJOINTS; ++i) {{
fixedM[i] = fixedM[i-1]*fixedE[i].inverse();
}}
// == Get the Changed E's.
// ========================================
for (int i=0; i < NJOINTS; ++i) {{
changedE[i] = calculateEMatrix(rotations[i], translations[i]);
}}
// == Get the changed M's
// ========================================
changedM[0] = changedE[0];
for (int i=1; i < NJOINTS; ++i) {{
changedM[i] = changedE[i]*changedM[i-1];
}}
// == Create the final M's by multiplying the fixed and changed M's.
// ========================================
for (int i=0; i < NJOINTS; ++i) {{
M[i] = fixedM[i]*changedM[i];
for (int ii=0; ii < 4; ++ii) {{
for (int jj=0; jj < 4; ++jj) {{
int idx = ii*4 + jj;
jointTransforms[i].matrix[idx] = M[i](ii,jj);
}}
}}
}}
}}
extern "C"
// NOTE: UNFINISHED
__global__ void FKSerial2(GLVertex *rotations,
GLVertex *translations,
Plain4x4Matrix_f *inverseBindingMatrix,
Plain4x4Matrix_f *jointTransforms)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
rotations += mouseIdx*NJOINTS;
translations += mouseIdx*NJOINTS;
jointTransforms += mouseIdx*NJOINTS;
Matrix4f lastJointWorldMatrix = Matrix4f::Identity();
Matrix4f jointWorldMatrix = Matrix4f::Identity();
// For each joint, starting with an identity transform,...
for (int ijoint=0; ijoint<NJOINTS; ++ijoint) {{
// Take the rotation and translation
float rx = rotations[ijoint].x;
float ry = rotations[ijoint].y;
float rz = rotations[ijoint].z;
float tx = translations[ijoint].x;
float ty = translations[ijoint].y;
float tz = translations[ijoint].z;
// Get the local transform
Matrix4f tmp = rotateMatrix(rx,ry,rz);
translate(tmp, tx, ty, tz);
Matrix4f localTransform = tmp;
// Multiply it by the parent world matrix to get the current world
jointWorldMatrix = localTransform*lastJointWorldMatrix;
// Multiply it by the inverse binding matrix to get the skinning matrix
Matrix4f Bi = Matrix4f::Identity();
copyMat4x4ToEigen(inverseBindingMatrix[ijoint], Bi);
Matrix4f M = Bi*jointWorldMatrix;
// Save that skinning matrix out
copyEigenToMat4x4(jointTransforms[ijoint], M);
// Save out the current world matrix as the parent of the next one
lastJointWorldMatrix = jointWorldMatrix;
}}
}}
// CODE GRAVEYARD
/*
extern "C"
__global__ void PlaygroundKernel(GLVertex *vertices,
GLTriangleFace *triangles,
float *depthBuffer,
float *mouseImage,
JointInfluences4Joints *jointWeights,
JointIndices4Joints *jointIndices,
Plain4x4Matrix_f *jointWorldMatrix,
Plain4x4Matrix_f *inverseBindingMatrix,
int numJoints,
int resolutionX,
int resolutionY )
{{
Matrix3f transform = scaleMatrix3D(30.0, 30.0, 30.0);
Vector3f transvec(40.5, 40.5, 0.);
__shared__ float thisMouse[6400];
for (int i=0; i<6400;++i) {{
thisMouse[i] = mouseImage[i];
}}
float synthMouse[6400];
for (int i=0; i<6400;++i) {{
synthMouse[i] = 0.0f;
}}
// Solve some FK, just a lil bit.
Matrix4f posingMatrix[NJOINTS];
for (int i=0; i < NJOINTS; ++i) {{
Matrix4f thisJointWorld = EigenMatFromMemory(jointWorldMatrix[i].matrix);
Matrix4f thisInverseBinding = EigenMatFromMemory(inverseBindingMatrix[i].matrix);
posingMatrix[i] = thisJointWorld*thisInverseBinding;
}}
// for (int ijoint=0; ijoint < NJOINTS; ++ijoint) {{
// printf("Matrix%d\n[\n", ijoint);
// for (int j=0; j < 4; ++j) {{
// printf("%0.2f,%0.2f,%0.2f,%0.2f,\n",
// posingMatrix[ijoint](j,0),
// posingMatrix[ijoint](j,1),
// posingMatrix[ijoint](j,2),
// posingMatrix[ijoint](j,3));
// }}
// printf("]\n");
// }}
// Pre-transform the vertices
for (int i=0; i < NVERTS; ++i) {{
// Grab from memory
Vector3f v(vertices[i].x, vertices[i].z, vertices[i].y);
// Transform to screen space
v = transform*v;
v = v+transvec;
Vector4f v4;
v4(0) = v(0); v4(1) = v(1); v4(2) = v(2); v4(3) = 1.0;
// Skin
int indices[N_JOINT_INFLUENCES];
indices[0] = jointIndices[i].i0;
indices[1] = jointIndices[i].i1;
indices[2] = jointIndices[i].i2;
indices[3] = jointIndices[i].i3;
float weights[N_JOINT_INFLUENCES];
weights[0] = jointWeights[i].w0;
weights[1] = jointWeights[i].w1;
weights[2] = jointWeights[i].w2;
weights[3] = jointWeights[i].w3;
Vector4f skinnedVert;
skinnedVert(0) = 0.0; skinnedVert(1) = 0.0;
skinnedVert(2) = 0.0; skinnedVert(3) = 0.0;
for (int j=0; j < N_JOINT_INFLUENCES; ++j) {{
int idx = indices[j];
float weight = weights[j];
Matrix4f thisMat = posingMatrix[idx];
Vector4f thisVec = weight*thisMat*v4;
skinnedVert = skinnedVert+thisVec;
}}
vertices[i].x = skinnedVert(0);
vertices[i].y = skinnedVert(1);
vertices[i].z = skinnedVert(2);
}}
// For each triangle, rasterize the crap out of it
// (for now, don't care about overlaps)
for (int iface=0; iface < NFACES; ++iface)
{{
unsigned short i0 = triangles[iface].v0;
unsigned short i1 = triangles[iface].v1;
unsigned short i2 = triangles[iface].v2;
// NOTE THE SWAP MANG
// MAYA's coordinates are left-handed, which I liketh not.
Vector3f a(vertices[i0].x, vertices[i0].y, vertices[i0].z);
Vector3f b(vertices[i1].x, vertices[i1].y, vertices[i1].z);;
Vector3f c(vertices[i2].x, vertices[i2].y, vertices[i2].z);;
Vector3f ll = getLowerLeftOfTriangle(a,b,c);
Vector3f ur = getUpperRightOfTriangle(a,b,c);
for (int i=ll(1); i < ur(1); ++i) {{
for (int j=ll(0); j < ur(0); ++j) {{
Vector3f pt(j+0.5,i+0.5,0);
Vector3f baryCoord = calcBarycentricCoordinate(pt,a,b,c);
bool inTriangle = isBarycentricCoordinateInBounds(baryCoord);
if (inTriangle) {{
float interpZ = getZAtBarycentricCoordinate(baryCoord,a,b,c);
long int idx = i*resolutionX + j;
float oldval = synthMouse[idx];
float compareval = thisMouse[idx];
if (oldval <= interpZ) {{
synthMouse[idx] = interpZ;
// atomicExch(&synthMouse[idx], interpZ-compareval);
}}
}}
}}
}}
for (int i=0; i < resolutionX*resolutionY; ++i) {{
depthBuffer[i] = synthMouse[i];
mouseImage[i] = synthMouse[i] - thisMouse[i];
}}
}}
}}
extern "C"
__global__ void rasterizeParallel(GLVertex *skinnedVertices,
GLVertex *vertices, // TODO: remove once FK and skinning exist.
GLTriangleFace *triangles,
float *depthBuffer)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
// The block determines which primitive we're on
// printf("Working on triangle %d\n", bx);
const int primitiveIdx = bx;
Matrix3f scale_matrix = scaleMatrix3D(RESOLUTION_X*0.3, RESOLUTION_Y*0.3, 24.0);
Vector3f translate_vector(RESOLUTION_X/2, RESOLUTION_Y/2, 0.);
__shared__ Vector3f a;
__shared__ Vector3f b;
__shared__ Vector3f c;
__shared__ Vector3f ll;
__shared__ Vector3f ur;
__shared__ int boundingBoxWidth;
__shared__ int boundingBoxHeight;
__shared__ int numPixelsInBoundingBox;
__shared__ float *sharedDepthBuffer;
if (threadIdx.x == 0) {{
// Grab the triangle indices
unsigned short i0 = triangles[primitiveIdx].v0;
unsigned short i1 = triangles[primitiveIdx].v1;
unsigned short i2 = triangles[primitiveIdx].v2;
// NOTE THE SWAP MANG
// MAYA's coordinates are left-handed, which I liketh not.
a << vertices[i0].x, vertices[i0].y, vertices[i0].z;
b << vertices[i1].x, vertices[i1].y, vertices[i1].z;
c << vertices[i2].x, vertices[i2].y, vertices[i2].z;
// THIS IS A HACK UNTIL FK AND SKINNING ARE IMPLEMENTED
a = scale_matrix*a;
a = a+translate_vector;
b = scale_matrix*b;
b = b+translate_vector;
c = scale_matrix*c;
c = c+translate_vector;
// Find the bounding box
ll = getLowerLeftOfTriangle(a,b,c);
ur = getUpperRightOfTriangle(a,b,c);
boundingBoxWidth = (int)ceilf(ur(0) - ll(0));
boundingBoxHeight = (int)ceilf(ur(1) - ll(1));
numPixelsInBoundingBox = boundingBoxWidth*boundingBoxHeight;
// Create a space for shared memory to be written to
sharedDepthBuffer = (float *)malloc(numPixelsInBoundingBox*sizeof(float));
}}
__syncthreads();
for (int i=0; i < numPixelsInBoundingBox; ++i) {{
sharedDepthBuffer[i] = (float)(i); }}
// All threads write into the sharedDepthBuffer
// for (int i=ll(1); i < ur(1); ++i) {{
// for (int j=ll(0); j < ur(0); ++j) {{
// Vector3f pt(j+0.5,i+0.5,0);
// Vector3f baryCoord = calcBarycentricCoordinate(pt,a,b,c);
// bool inTriangle = isBarycentricCoordinateInBounds(baryCoord);
// if (inTriangle) {{
// float interpZ = getZAtBarycentricCoordinate(baryCoord,a,b,c);
// long int idx = i*RESOLUTION_X + j;
// float oldval = depthBuffer[idx];
// if (oldval <= interpZ) {{
// atomicExch(&depthBuffer[idx], interpZ);
// }}
// }}
// }}
// }}
// __syncthreads();
// Write out to the depth buffer
// TODO: should use atomicCAS for this, I believe.
int counter = 0;
if (threadIdx.x == 0) {{
for (int i=ll(1); i < ur(1); ++i) {{
for (int j=ll(0); j < ur(0); ++j) {{
long int idx = i*RESOLUTION_X + j;
float oldVal = depthBuffer[idx];
float newVal = sharedDepthBuffer[counter];
newVal = 999.0;
if (oldVal <= newVal) {{
atomicExch(&depthBuffer[idx], newVal);
}}
++counter;
}}
}}
}}
// __syncthreads();
}}
*/
| 170ae582ebe9edbd4e1e22ae6df90a937f176166.cu | // #define EIGEN_NO_MALLOC
#define NDEBUG // VERY VERY IMPORTANT FOR PERFORMANCE!!
#include <stdio.h>
#include <algorithm>
#include <Eigen/Dense>
#include <math.h>
#include <vector>
using namespace Eigen;
using namespace std;
#define NVERTS 484
#define NFACES 940
#define N_JOINT_INFLUENCES 4
#define NJOINTS {njoints}
#define RESOLUTION_X {resx}
#define RESOLUTION_Y {resy}
#define NUMPIXELS_PER_MOUSE RESOLUTION_X*RESOLUTION_Y
// #define SHITTYSHITTYHACK
// ==============================
// Helpers!
// ==============================
struct GLVertex
{{
float x;
float y;
float z;
}};
struct BoundingBox
{{
GLVertex lowerLeft;
GLVertex upperRight;
}};
struct GLTriangleFace
{{
unsigned short v0;
unsigned short v1;
unsigned short v2;
}};
struct GLTriangle
{{
GLVertex a;
GLVertex b;
GLVertex c;
}};
struct JointWeights
{{
float w[N_JOINT_INFLUENCES];
}};
struct JointWeightIndices
{{
unsigned short idx[N_JOINT_INFLUENCES];
}};
struct Plain4x4Matrix_f
{{
float matrix[16];
}};
struct Plain4x4Matrix_us
{{
unsigned short matrix[16];
}};
__device__ inline float deg2rad(float deg)
{{
return M_PI / 180.0f * deg;
}}
__device__ inline float rad2deg(float rad)
{{
return rad * 180.0f / M_PI;
}}
__device__ inline Vector3f getLowerLeftOfTriangle(Vector3f a, Vector3f b, Vector3f c)
{{
float x,y,z;
x = fminf(fminf(a(0),b(0)),c(0));
y = fminf(fminf(a(1),b(1)),c(1));
z = fminf(fminf(a(2),b(2)),c(2));
return Vector3f(x,y,z);
}}
__device__ inline Vector3f getUpperRightOfTriangle(Vector3f a, Vector3f b, Vector3f c)
{{
float x,y,z;
x = fmaxf(fmaxf(a(0),b(0)),c(0));
y = fmaxf(fmaxf(a(1),b(1)),c(1));
z = fmaxf(fmaxf(a(2),b(2)),c(2));
return Vector3f(x,y,z);
}}
// a,b,c are the vertices of the reference triangle
// This takes 3D vectors, because our points are mostly in 3D. It's for convenience, not correctness.
__device__ inline Vector3f calcBarycentricCoordinate(Vector3f vec, Vector3f a, Vector3f b, Vector3f c)
{{
float den = 1 / ((b(1) - c(1)) * (a(0) - c(0)) + (c(0) - b(0)) * (a(1) - c(1)));
float x = ((b(1) - c(1)) * (vec(0) - c(0)) + (c(0) - b(0)) * (vec(1) - c(1))) * den;
float y = ((c(1) - a(1)) * (vec(0) - c(0)) + (a(0) - c(0)) * (vec(1) - c(1))) * den;
float z = 1.0 - x - y;
return Vector3f(x,y,z);
}}
__device__ inline bool isBarycentricCoordinateInBounds(Vector3f barycentricCoord)
{{
return barycentricCoord(0) >= 0.0 && barycentricCoord(0) <= 1.0 &&
barycentricCoord(1) >= 0.0 && barycentricCoord(1) <= 1.0 &&
barycentricCoord(2) >= 0.0 && barycentricCoord(2)<= 1.0;
}}
__device__ inline float getZAtBarycentricCoordinate(Vector3f barycentricCoord, Vector3f a, Vector3f b, Vector3f c)
{{
return barycentricCoord(0)*a(2) + barycentricCoord(1)*b(2) + barycentricCoord(2)*c(2);
}}
__device__ inline Matrix3f rotateMatrix3D(float rotx, float roty, float rotz) {{
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix3f Rx = Matrix3f::Identity();
Matrix3f Ry = Matrix3f::Identity();
Matrix3f Rz = Matrix3f::Identity();
Rx = Rx*cx;
Ry = Ry*cy;
Rz = Rz*cz;
Rx(0,0) += 1.0 - cx;
Ry(1,1) += 1.0 - cy;
Rz(2,2) += 1.0 - cz;
Rx(1,2) += -sx;
Rx(2,1) += sx;
Ry(0,2) += sy;
Ry(2,0) += -sy;
Rz(0,1) += -sz;
Rz(1,0) += sz;
Matrix3f t;
t = Rx*Ry*Rz;
return t;
}}
__device__ inline Matrix4f rotateMatrix2(float rotx, float roty, float rotz) {{
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix4f Rx = Matrix4f::Identity();
Matrix4f Ry = Matrix4f::Identity();
Matrix4f Rz = Matrix4f::Identity();
Rx = Rx*cx;
Ry = Ry*cy;
Rz = Rz*cz;
Rx(0,0) += 1.0 - cx;
Ry(1,1) += 1.0 - cy;
Rz(2,2) += 1.0 - cz;
Rx(1,2) += -sx;
Rx(2,1) += sx;
Ry(0,2) += sy;
Ry(2,0) += -sy;
Rz(0,1) += -sz;
Rz(1,0) += sz;
Matrix4f t = Rx*Ry;
return t;
}}
__device__ inline Matrix4f rotateMatrix(float rotx, float roty, float rotz) {{
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix4f Rx = Matrix4f::Identity();
Matrix4f Ry = Matrix4f::Identity();
Matrix4f Rz = Matrix4f::Identity();
// Right-handed convention
Rx(1,1) = cx;
Rx(1,2) = sx;
Rx(2,1) = -sx;
Rx(2,2) = cx;
Ry(0,0) = cy;
Ry(0,2) = -sy;
Ry(2,0) = sy;
Ry(2,2) = cy;
Rz(0,0) = cz;
Rz(0,1) = sz;
Rz(1,0) = -sz;
Rz(2,2) = cz;
Matrix4f t = Matrix4f::Identity();
t = t*Rz*Ry*Rx;
return t;
}}
__device__ inline void translate(Matrix4f &transform, float transx, float transy, float transz)
{{
transform(0,3) += transx;
transform(1,3) += transy;
transform(2,3) += transz;
}}
__device__ inline Matrix4f translateMatrix(float transx, float transy, float transz)
{{
Matrix4f t = Matrix4f::Identity();
t(0,3) = transx;
t(1,3) = transy;
t(2,3) = transz;
return t;
}}
__device__ inline Matrix3f scaleMatrix3D(float scalex, float scaley, float scalez)
{{
Matrix3f t = Matrix3f::Identity();
t(0,0) = scalex;
t(1,1) = scaley;
t(2,2) = scalez;
return t;
}}
__device__ inline Matrix4f scaleMatrix(float scalex, float scaley, float scalez)
{{
Matrix4f t = Matrix4f::Identity();
t(0,0) = scalex;
t(1,1) = scaley;
t(2,2) = scalez;
return t;
}}
__device__ inline Matrix4f EigenMatFromMemory(float *mat4inMemory)
{{
Matrix4f m;
m <<
mat4inMemory[0], mat4inMemory[1], mat4inMemory[2], mat4inMemory[3],
mat4inMemory[4], mat4inMemory[5], mat4inMemory[6], mat4inMemory[7],
mat4inMemory[8], mat4inMemory[9], mat4inMemory[10], mat4inMemory[11],
mat4inMemory[12], mat4inMemory[13], mat4inMemory[14], mat4inMemory[15];
return m;
}}
__device__ void copyMat4x4ToEigen(Plain4x4Matrix_f plainMat, Matrix4f &EigenMat)
{{
for (int i=0; i < 4; ++i) {{
for (int j=0; j < 4; ++j) {{
int idx = i*4 +j;
EigenMat(i,j) = plainMat.matrix[idx];
}}
}}
}}
__device__ void copyEigenToMat4x4(Plain4x4Matrix_f plainMat, Matrix4f &EigenMat)
{{
for (int i=0; i < 4; ++i) {{
for (int j=0; j < 4; ++j) {{
int idx = i*4 +j;
plainMat.matrix[idx] = EigenMat(i,j);
}}
}}
}}
__device__ void printEigenMat(Matrix4f someMatrix)
{{
for (int j=0; j < 4; ++j) {{
printf("\t%2.3f, %2.3f, %2.3f, %2.3f,\n",
someMatrix(j,0),
someMatrix(j,1),
someMatrix(j,2),
someMatrix(j,3));
}}
printf("\n");
}}
// ==============================
// Here's the actual work here!
// ==============================
extern "C"
__global__ void rasterizeSerial(GLVertex *skinnedVertices,
GLVertex *vertices, // REMOVE THIS WHEN FK+SKINNING ARE IMPLEMENTED
GLTriangleFace *triangles,
float *synthPixels)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
// Make sure we're looking at the right data
skinnedVertices += NVERTS*(bw*bx + tx);
#ifdef SHITTYSHITTYHACK
// ======================================================================
// HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
// HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
// HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
// Remove when FK and skinning are implemented
Matrix3f scale_matrix = scaleMatrix3D(RESOLUTION_X*0.3, RESOLUTION_Y*0.3, 24.0);
Vector3f translate_vector(RESOLUTION_X/2, RESOLUTION_Y/2, 0.);
for (int i=0; i < NVERTS; ++i) {{
// Grab from memory
Vector3f v(vertices[i].x, vertices[i].y, vertices[i].z);
// Transform to screen space
v = scale_matrix*v;
v = v+translate_vector;
skinnedVertices[i].y = v(1);
skinnedVertices[i].z = v(2);
}}
// END HACK END HACK END HACK END HACK END HACK END HACK END HACK END HACK
// END HACK END HACK END HACK END HACK END HACK END HACK END HACK END HACK
// END HACK END HACK END HACK END HACK END HACK END HACK END HACK END HACK
// ======================================================================
#endif
int depthBufferOffset = NUMPIXELS_PER_MOUSE*(bx*bw + tx);
// For each triangle, rasterize the crap out of it
// (for now, don't care about overlaps)
for (int iface=0; iface < NFACES; ++iface)
{{
unsigned short i0 = triangles[iface].v0;
unsigned short i1 = triangles[iface].v1;
unsigned short i2 = triangles[iface].v2;
// NOTE THE SWAP MANG
// MAYA's coordinates are left-handed, which I liketh not.
Vector3f a(skinnedVertices[i0].x, skinnedVertices[i0].y, skinnedVertices[i0].z);
Vector3f b(skinnedVertices[i1].x, skinnedVertices[i1].y, skinnedVertices[i1].z);;
Vector3f c(skinnedVertices[i2].x, skinnedVertices[i2].y, skinnedVertices[i2].z);;
Vector3f ll = getLowerLeftOfTriangle(a,b,c);
Vector3f ur = getUpperRightOfTriangle(a,b,c);
for (int i=ll(1); i < ur(1); ++i) {{
for (int j=ll(0); j < ur(0); ++j) {{
Vector3f pt(j+0.5,i+0.5,0);
Vector3f baryCoord = calcBarycentricCoordinate(pt,a,b,c);
bool inTriangle = isBarycentricCoordinateInBounds(baryCoord);
if (inTriangle) {{
float interpZ = getZAtBarycentricCoordinate(baryCoord,a,b,c);
long int idx = i*RESOLUTION_X + j;
idx += depthBufferOffset;
float oldval = synthPixels[idx];
if (oldval <= interpZ) {{
atomicExch(&synthPixels[idx], interpZ);
}}
}}
}}
}}
}}
}}
extern "C"
// Fastest with 10 blocks, 256 threads
// Also, faster than the cache version.
__global__ void likelihoodSerial(float *synthPixels,
float *realPixels,
float *likelihood)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
int synthPixelOffset = NUMPIXELS_PER_MOUSE*mouseIdx;
float accumulator = 0.0;
for (int i=0; i < NUMPIXELS_PER_MOUSE; ++i) {{
accumulator += abs(realPixels[i] - synthPixels[i+synthPixelOffset]);
}}
atomicExch(&likelihood[mouseIdx], accumulator);
}}
extern "C"
__global__ void skinningSerial(Plain4x4Matrix_f *jointTransforms,
GLVertex *vertices,
JointWeights *jointWeights,
JointWeightIndices *jointWeightIndices,
GLVertex *skinnedVertices)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
jointTransforms += mouseIdx*NJOINTS;
skinnedVertices += mouseIdx*NVERTS;
// Calculate a joint's local rotation matrix
Vector4f vertex;
vertex << 0.0, 0.0, 0.0, 0.0;
// Grab the joint transformations, put them in a usable format
// (All matrix multiplication is done w/ Eigen)
Matrix4f theseJoints[NJOINTS];
for (int i=0; i < NJOINTS; ++i) {{
theseJoints[i] = Matrix4f(jointTransforms[i].matrix);
}}
// Precalculate some scaling matrices
Matrix4f scale_matrix = scaleMatrix(RESOLUTION_X*0.3, RESOLUTION_Y*0.3, 24.0);
Vector4f translate_vector(RESOLUTION_X/2, RESOLUTION_Y/2, 0., 0.0);
for (int i=0; i < NVERTS; ++i) {{
// Grab the unposed vertex
Vector4f vertex(vertices[i].x, vertices[i].z, vertices[i].y, 1.0);
// Make our destination vertex
Vector4f skinnedVertex(0., 0., 0., 0.);
for (int ijoint=0; ijoint<N_JOINT_INFLUENCES; ++ijoint) {{
int index = jointWeightIndices[i].idx[ijoint];
float weight = jointWeights[i].w[ijoint];
skinnedVertex += weight*theseJoints[index]*vertex;
}}
// After we've computed the weighted skin position,
// then we'll scale and translate it into a proper skin space
skinnedVertex = scale_matrix*skinnedVertex;
skinnedVertex = skinnedVertex+translate_vector;
skinnedVertices[i].x = skinnedVertex(0);
skinnedVertices[i].y = skinnedVertex(1);
skinnedVertices[i].z = skinnedVertex(2);
}}
}}
__device__ Matrix4f calculateEMatrix(GLVertex angle, GLVertex translation)
{{
float rotx = angle.x;
float roty = angle.y;
float rotz = angle.z;
rotx = deg2rad(rotx);
roty = deg2rad(roty);
rotz = deg2rad(rotz);
float cx = cos(rotx);
float sx = sin(rotx);
float cy = cos(roty);
float sy = sin(roty);
float cz = cos(rotz);
float sz = sin(rotz);
Matrix4f out = Matrix4f::Identity();
out << cy*cz, cy*sz, -sy , translation.x,
-cx*sz+sx*sy*cz, cx*cz+sx*sy*sz, sx*cy, translation.y,
sx*sz+sy*cx*cz, -sx*cz+cx*sy*sz, cx*cy, translation.z,
0.0, 0.0, 0.0, 1.0;
return out;
}}
extern "C"
__global__ void FKSerial(GLVertex *baseRotations,
GLVertex *rotations,
GLVertex *translations,
Plain4x4Matrix_f *jointTransforms)
{{
// NOTE:
// - The E inverse could be optimized.
// Notation:
// M - skinning matrix. You can multiply an unposed vector into M and get a posed vector.
// E - local transformation matrix. Represents a rotation and translation from (0,0)
// "Fixed" matrix - a matrix computed using defualt, or unposed, rotations
// "Changed" matrix - a matrix computed using non-default, or posed, rotations
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
rotations += mouseIdx*NJOINTS;
jointTransforms += mouseIdx*NJOINTS;
Matrix4f fixedE[NJOINTS];
Matrix4f fixedM[NJOINTS];
Matrix4f changedE[NJOINTS];
Matrix4f changedM[NJOINTS];
Matrix4f M[NJOINTS];
// == Get the fixed E's.
// ========================================
for (int i=0; i < NJOINTS; ++i) {{
fixedE[i] = calculateEMatrix(baseRotations[i], translations[i]);
}}
// == Get the fixed M's.
// ========================================
fixedM[0] = fixedE[0].inverse();
for (int i=1; i < NJOINTS; ++i) {{
fixedM[i] = fixedM[i-1]*fixedE[i].inverse();
}}
// == Get the Changed E's.
// ========================================
for (int i=0; i < NJOINTS; ++i) {{
changedE[i] = calculateEMatrix(rotations[i], translations[i]);
}}
// == Get the changed M's
// ========================================
changedM[0] = changedE[0];
for (int i=1; i < NJOINTS; ++i) {{
changedM[i] = changedE[i]*changedM[i-1];
}}
// == Create the final M's by multiplying the fixed and changed M's.
// ========================================
for (int i=0; i < NJOINTS; ++i) {{
M[i] = fixedM[i]*changedM[i];
for (int ii=0; ii < 4; ++ii) {{
for (int jj=0; jj < 4; ++jj) {{
int idx = ii*4 + jj;
jointTransforms[i].matrix[idx] = M[i](ii,jj);
}}
}}
}}
}}
extern "C"
// NOTE: UNFINISHED
__global__ void FKSerial2(GLVertex *rotations,
GLVertex *translations,
Plain4x4Matrix_f *inverseBindingMatrix,
Plain4x4Matrix_f *jointTransforms)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
int mouseIdx = bx*bw + tx;
rotations += mouseIdx*NJOINTS;
translations += mouseIdx*NJOINTS;
jointTransforms += mouseIdx*NJOINTS;
Matrix4f lastJointWorldMatrix = Matrix4f::Identity();
Matrix4f jointWorldMatrix = Matrix4f::Identity();
// For each joint, starting with an identity transform,...
for (int ijoint=0; ijoint<NJOINTS; ++ijoint) {{
// Take the rotation and translation
float rx = rotations[ijoint].x;
float ry = rotations[ijoint].y;
float rz = rotations[ijoint].z;
float tx = translations[ijoint].x;
float ty = translations[ijoint].y;
float tz = translations[ijoint].z;
// Get the local transform
Matrix4f tmp = rotateMatrix(rx,ry,rz);
translate(tmp, tx, ty, tz);
Matrix4f localTransform = tmp;
// Multiply it by the parent world matrix to get the current world
jointWorldMatrix = localTransform*lastJointWorldMatrix;
// Multiply it by the inverse binding matrix to get the skinning matrix
Matrix4f Bi = Matrix4f::Identity();
copyMat4x4ToEigen(inverseBindingMatrix[ijoint], Bi);
Matrix4f M = Bi*jointWorldMatrix;
// Save that skinning matrix out
copyEigenToMat4x4(jointTransforms[ijoint], M);
// Save out the current world matrix as the parent of the next one
lastJointWorldMatrix = jointWorldMatrix;
}}
}}
// CODE GRAVEYARD
/*
extern "C"
__global__ void PlaygroundKernel(GLVertex *vertices,
GLTriangleFace *triangles,
float *depthBuffer,
float *mouseImage,
JointInfluences4Joints *jointWeights,
JointIndices4Joints *jointIndices,
Plain4x4Matrix_f *jointWorldMatrix,
Plain4x4Matrix_f *inverseBindingMatrix,
int numJoints,
int resolutionX,
int resolutionY )
{{
Matrix3f transform = scaleMatrix3D(30.0, 30.0, 30.0);
Vector3f transvec(40.5, 40.5, 0.);
__shared__ float thisMouse[6400];
for (int i=0; i<6400;++i) {{
thisMouse[i] = mouseImage[i];
}}
float synthMouse[6400];
for (int i=0; i<6400;++i) {{
synthMouse[i] = 0.0f;
}}
// Solve some FK, just a lil bit.
Matrix4f posingMatrix[NJOINTS];
for (int i=0; i < NJOINTS; ++i) {{
Matrix4f thisJointWorld = EigenMatFromMemory(jointWorldMatrix[i].matrix);
Matrix4f thisInverseBinding = EigenMatFromMemory(inverseBindingMatrix[i].matrix);
posingMatrix[i] = thisJointWorld*thisInverseBinding;
}}
// for (int ijoint=0; ijoint < NJOINTS; ++ijoint) {{
// printf("Matrix%d\n[\n", ijoint);
// for (int j=0; j < 4; ++j) {{
// printf("%0.2f,%0.2f,%0.2f,%0.2f,\n",
// posingMatrix[ijoint](j,0),
// posingMatrix[ijoint](j,1),
// posingMatrix[ijoint](j,2),
// posingMatrix[ijoint](j,3));
// }}
// printf("]\n");
// }}
// Pre-transform the vertices
for (int i=0; i < NVERTS; ++i) {{
// Grab from memory
Vector3f v(vertices[i].x, vertices[i].z, vertices[i].y);
// Transform to screen space
v = transform*v;
v = v+transvec;
Vector4f v4;
v4(0) = v(0); v4(1) = v(1); v4(2) = v(2); v4(3) = 1.0;
// Skin
int indices[N_JOINT_INFLUENCES];
indices[0] = jointIndices[i].i0;
indices[1] = jointIndices[i].i1;
indices[2] = jointIndices[i].i2;
indices[3] = jointIndices[i].i3;
float weights[N_JOINT_INFLUENCES];
weights[0] = jointWeights[i].w0;
weights[1] = jointWeights[i].w1;
weights[2] = jointWeights[i].w2;
weights[3] = jointWeights[i].w3;
Vector4f skinnedVert;
skinnedVert(0) = 0.0; skinnedVert(1) = 0.0;
skinnedVert(2) = 0.0; skinnedVert(3) = 0.0;
for (int j=0; j < N_JOINT_INFLUENCES; ++j) {{
int idx = indices[j];
float weight = weights[j];
Matrix4f thisMat = posingMatrix[idx];
Vector4f thisVec = weight*thisMat*v4;
skinnedVert = skinnedVert+thisVec;
}}
vertices[i].x = skinnedVert(0);
vertices[i].y = skinnedVert(1);
vertices[i].z = skinnedVert(2);
}}
// For each triangle, rasterize the crap out of it
// (for now, don't care about overlaps)
for (int iface=0; iface < NFACES; ++iface)
{{
unsigned short i0 = triangles[iface].v0;
unsigned short i1 = triangles[iface].v1;
unsigned short i2 = triangles[iface].v2;
// NOTE THE SWAP MANG
// MAYA's coordinates are left-handed, which I liketh not.
Vector3f a(vertices[i0].x, vertices[i0].y, vertices[i0].z);
Vector3f b(vertices[i1].x, vertices[i1].y, vertices[i1].z);;
Vector3f c(vertices[i2].x, vertices[i2].y, vertices[i2].z);;
Vector3f ll = getLowerLeftOfTriangle(a,b,c);
Vector3f ur = getUpperRightOfTriangle(a,b,c);
for (int i=ll(1); i < ur(1); ++i) {{
for (int j=ll(0); j < ur(0); ++j) {{
Vector3f pt(j+0.5,i+0.5,0);
Vector3f baryCoord = calcBarycentricCoordinate(pt,a,b,c);
bool inTriangle = isBarycentricCoordinateInBounds(baryCoord);
if (inTriangle) {{
float interpZ = getZAtBarycentricCoordinate(baryCoord,a,b,c);
long int idx = i*resolutionX + j;
float oldval = synthMouse[idx];
float compareval = thisMouse[idx];
if (oldval <= interpZ) {{
synthMouse[idx] = interpZ;
// atomicExch(&synthMouse[idx], interpZ-compareval);
}}
}}
}}
}}
for (int i=0; i < resolutionX*resolutionY; ++i) {{
depthBuffer[i] = synthMouse[i];
mouseImage[i] = synthMouse[i] - thisMouse[i];
}}
}}
}}
extern "C"
__global__ void rasterizeParallel(GLVertex *skinnedVertices,
GLVertex *vertices, // TODO: remove once FK and skinning exist.
GLTriangleFace *triangles,
float *depthBuffer)
{{
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
const uint tx = threadIdx.x;
// The block determines which primitive we're on
// printf("Working on triangle %d\n", bx);
const int primitiveIdx = bx;
Matrix3f scale_matrix = scaleMatrix3D(RESOLUTION_X*0.3, RESOLUTION_Y*0.3, 24.0);
Vector3f translate_vector(RESOLUTION_X/2, RESOLUTION_Y/2, 0.);
__shared__ Vector3f a;
__shared__ Vector3f b;
__shared__ Vector3f c;
__shared__ Vector3f ll;
__shared__ Vector3f ur;
__shared__ int boundingBoxWidth;
__shared__ int boundingBoxHeight;
__shared__ int numPixelsInBoundingBox;
__shared__ float *sharedDepthBuffer;
if (threadIdx.x == 0) {{
// Grab the triangle indices
unsigned short i0 = triangles[primitiveIdx].v0;
unsigned short i1 = triangles[primitiveIdx].v1;
unsigned short i2 = triangles[primitiveIdx].v2;
// NOTE THE SWAP MANG
// MAYA's coordinates are left-handed, which I liketh not.
a << vertices[i0].x, vertices[i0].y, vertices[i0].z;
b << vertices[i1].x, vertices[i1].y, vertices[i1].z;
c << vertices[i2].x, vertices[i2].y, vertices[i2].z;
// THIS IS A HACK UNTIL FK AND SKINNING ARE IMPLEMENTED
a = scale_matrix*a;
a = a+translate_vector;
b = scale_matrix*b;
b = b+translate_vector;
c = scale_matrix*c;
c = c+translate_vector;
// Find the bounding box
ll = getLowerLeftOfTriangle(a,b,c);
ur = getUpperRightOfTriangle(a,b,c);
boundingBoxWidth = (int)ceilf(ur(0) - ll(0));
boundingBoxHeight = (int)ceilf(ur(1) - ll(1));
numPixelsInBoundingBox = boundingBoxWidth*boundingBoxHeight;
// Create a space for shared memory to be written to
sharedDepthBuffer = (float *)malloc(numPixelsInBoundingBox*sizeof(float));
}}
__syncthreads();
for (int i=0; i < numPixelsInBoundingBox; ++i) {{
sharedDepthBuffer[i] = (float)(i); }}
// All threads write into the sharedDepthBuffer
// for (int i=ll(1); i < ur(1); ++i) {{
// for (int j=ll(0); j < ur(0); ++j) {{
// Vector3f pt(j+0.5,i+0.5,0);
// Vector3f baryCoord = calcBarycentricCoordinate(pt,a,b,c);
// bool inTriangle = isBarycentricCoordinateInBounds(baryCoord);
// if (inTriangle) {{
// float interpZ = getZAtBarycentricCoordinate(baryCoord,a,b,c);
// long int idx = i*RESOLUTION_X + j;
// float oldval = depthBuffer[idx];
// if (oldval <= interpZ) {{
// atomicExch(&depthBuffer[idx], interpZ);
// }}
// }}
// }}
// }}
// __syncthreads();
// Write out to the depth buffer
// TODO: should use atomicCAS for this, I believe.
int counter = 0;
if (threadIdx.x == 0) {{
for (int i=ll(1); i < ur(1); ++i) {{
for (int j=ll(0); j < ur(0); ++j) {{
long int idx = i*RESOLUTION_X + j;
float oldVal = depthBuffer[idx];
float newVal = sharedDepthBuffer[counter];
newVal = 999.0;
if (oldVal <= newVal) {{
atomicExch(&depthBuffer[idx], newVal);
}}
++counter;
}}
}}
}}
// __syncthreads();
}}
*/
|
3f42e61dc2fb6440342f5674ae8a7d0a5743b88f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///////////////////////////////////////////////////////////////////////////
// InterpolateGPU: //
///////////////////////////////////////////////////////////////////////////
#include "InterpolateGPU.h"
//________________________________________________________________________________________________________________
__device__ int GetBinNumberTest(double aBinSize, int aNbins, double aValue)
{
//TODO check the accuracy of this
double tBinKStarMin, tBinKStarMax;
for(int i=0; i<aNbins; i++)
{
tBinKStarMin = i*aBinSize;
tBinKStarMax = (i+1)*aBinSize;
if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i;
}
return -1; //i.e. failure
}
//________________________________________________________________________________________________________________
__device__ int GetBinNumberTest(int aNbins, double aMin, double aMax, double aValue)
{
//TODO check the accuracy of this
double tBinSize = (aMax-aMin)/aNbins;
double tBinKStarMin, tBinKStarMax;
for(int i=0; i<aNbins; i++)
{
tBinKStarMin = i*tBinSize + aMin;
tBinKStarMax = (i+1)*tBinSize + aMin;
if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i;
}
return -1; //i.e. failure
}
//________________________________________________________________________________________________________________
__device__ int GetBinNumberTest(double aBinWidth, double aMin, double aMax, double aValue)
{
//TODO check the accuracy of this
int tNbins = (aMax-aMin)/aBinWidth;
double tBinKStarMin, tBinKStarMax;
for(int i=0; i<tNbins; i++)
{
tBinKStarMin = i*aBinWidth + aMin;
tBinKStarMax = (i+1)*aBinWidth + aMin;
if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i;
}
return -1; //i.e. failure
}
//________________________________________________________________________________________________________________
__device__ int GetInterpLowBinTest(InterpType aInterpType, InterpAxisType aAxisType, double aVal)
{
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
bool tErrorFlag = false;
switch(aInterpType)
{
case kGTilde:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fGTildeInfo->nBinsK;
tBinWidth = d_fGTildeInfo->binWidthK;
tMin = d_fGTildeInfo->minK;
tMax = d_fGTildeInfo->maxK;
break;
case kRaxis:
tNbins = d_fGTildeInfo->nBinsR;
tBinWidth = d_fGTildeInfo->binWidthR;
tMin = d_fGTildeInfo->minR;
tMax = d_fGTildeInfo->maxR;
break;
//Invalid axis selection
case kThetaaxis:
tErrorFlag = true;
break;
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kHyperGeo1F1:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fHyperGeo1F1Info->nBinsK;
tBinWidth = d_fHyperGeo1F1Info->binWidthK;
tMin = d_fHyperGeo1F1Info->minK;
tMax = d_fHyperGeo1F1Info->maxK;
break;
case kRaxis:
tNbins = d_fHyperGeo1F1Info->nBinsR;
tBinWidth = d_fHyperGeo1F1Info->binWidthR;
tMin = d_fHyperGeo1F1Info->minR;
tMax = d_fHyperGeo1F1Info->maxR;
break;
case kThetaaxis:
tNbins = d_fHyperGeo1F1Info->nBinsTheta;
tBinWidth = d_fHyperGeo1F1Info->binWidthTheta;
tMin = d_fHyperGeo1F1Info->minTheta;
tMax = d_fHyperGeo1F1Info->maxTheta;
break;
//Invalid axis selection
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kScattLen:
switch(aAxisType)
{
case kReF0axis:
tNbins = d_fScattLenInfo->nBinsReF0;
tBinWidth = d_fScattLenInfo->binWidthReF0;
tMin = d_fScattLenInfo->minReF0;
tMax = d_fScattLenInfo->maxReF0;
break;
case kImF0axis:
tNbins = d_fScattLenInfo->nBinsImF0;
tBinWidth = d_fScattLenInfo->binWidthImF0;
tMin = d_fScattLenInfo->minImF0;
tMax = d_fScattLenInfo->maxImF0;
break;
case kD0axis:
tNbins = d_fScattLenInfo->nBinsD0;
tBinWidth = d_fScattLenInfo->binWidthD0;
tMin = d_fScattLenInfo->minD0;
tMax = d_fScattLenInfo->maxD0;
break;
case kKaxis:
tNbins = d_fScattLenInfo->nBinsK;
tBinWidth = d_fScattLenInfo->binWidthK;
tMin = d_fScattLenInfo->minK;
tMax = d_fScattLenInfo->maxK;
break;
//Invalid axis selection
case kRaxis:
tErrorFlag = true;
break;
case kThetaaxis:
tErrorFlag = true;
break;
}
break;
}
//Check error
if(tErrorFlag) return -3;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0) return -2;
if(tReturnBin>=tNbins) return -1;
// if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
else return tReturnBin;
}
//________________________________________________________________________________________________________________
__device__ int GetInterpLowBinTestKaxisTest(double aVal)
{
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 160;
tBinWidth = 0.0025;
tMin = 0.0;
tMax = 0.40;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
else return tReturnBin;
}
//________________________________________________________________________________________________________________
__device__ int GetInterpLowBinTestRaxisTest(double aVal)
{
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 100;
tBinWidth = 0.1;
tMin = 0.0;
tMax = 10.0;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
else return tReturnBin;
}
//________________________________________________________________________________________________________________
__device__ double GetInterpLowBinTestCenter(InterpType aInterpType, InterpAxisType aAxisType, double aVal)
{
double tReturnValue;
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
bool tErrorFlag = false;
switch(aInterpType)
{
case kGTilde:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fGTildeInfo->nBinsK;
tBinWidth = d_fGTildeInfo->binWidthK;
tMin = d_fGTildeInfo->minK;
tMax = d_fGTildeInfo->maxK;
break;
case kRaxis:
tNbins = d_fGTildeInfo->nBinsR;
tBinWidth = d_fGTildeInfo->binWidthR;
tMin = d_fGTildeInfo->minR;
tMax = d_fGTildeInfo->maxR;
break;
//Invalid axis selection
case kThetaaxis:
tErrorFlag = true;
break;
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kHyperGeo1F1:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fHyperGeo1F1Info->nBinsK;
tBinWidth = d_fHyperGeo1F1Info->binWidthK;
tMin = d_fHyperGeo1F1Info->minK;
tMax = d_fHyperGeo1F1Info->maxK;
break;
case kRaxis:
tNbins = d_fHyperGeo1F1Info->nBinsR;
tBinWidth = d_fHyperGeo1F1Info->binWidthR;
tMin = d_fHyperGeo1F1Info->minR;
tMax = d_fHyperGeo1F1Info->maxR;
break;
case kThetaaxis:
tNbins = d_fHyperGeo1F1Info->nBinsTheta;
tBinWidth = d_fHyperGeo1F1Info->binWidthTheta;
tMin = d_fHyperGeo1F1Info->minTheta;
tMax = d_fHyperGeo1F1Info->maxTheta;
break;
//Invalid axis selection
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kScattLen:
switch(aAxisType)
{
case kReF0axis:
tNbins = d_fScattLenInfo->nBinsReF0;
tBinWidth = d_fScattLenInfo->binWidthReF0;
tMin = d_fScattLenInfo->minReF0;
tMax = d_fScattLenInfo->maxReF0;
break;
case kImF0axis:
tNbins = d_fScattLenInfo->nBinsImF0;
tBinWidth = d_fScattLenInfo->binWidthImF0;
tMin = d_fScattLenInfo->minImF0;
tMax = d_fScattLenInfo->maxImF0;
break;
case kD0axis:
tNbins = d_fScattLenInfo->nBinsD0;
tBinWidth = d_fScattLenInfo->binWidthD0;
tMin = d_fScattLenInfo->minD0;
tMax = d_fScattLenInfo->maxD0;
break;
case kKaxis:
tNbins = d_fScattLenInfo->nBinsK;
tBinWidth = d_fScattLenInfo->binWidthK;
tMin = d_fScattLenInfo->minK;
tMax = d_fScattLenInfo->maxK;
break;
//Invalid axis selection
case kRaxis:
tErrorFlag = true;
break;
case kThetaaxis:
tErrorFlag = true;
break;
}
break;
}
//Check error
if(tErrorFlag) return -2;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth;
return tReturnValue;
}
//________________________________________________________________________________________________________________
__device__ double GetInterpLowBinTestCenterKaxisTest(double aVal)
{
double tReturnValue;
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 160;
tBinWidth = 0.0025;
tMin = 0.0;
tMax = 0.40;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth;
return tReturnValue;
}
//________________________________________________________________________________________________________________
__device__ double GetInterpLowBinTestCenterRaxisTest(double aVal)
{
double tReturnValue;
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 100;
tBinWidth = 0.1;
tMin = 0.0;
tMax = 10.0;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth;
return tReturnValue;
}
//________________________________________________________________________________________________________________
__global__ void GTildeInterpolate(double* aKStar, double* aRStar, double* aGTildeReal)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
double tResultReal = 0.;
// double tResultImag = 0.;
//----------------------------
int tNbinsR = d_fGTildeInfo->nBinsR;
int tNbinsK = d_fGTildeInfo->nBinsK;
//----------------------------
//TODO put in check to make sure GetInterpLowBinTestCenter does not return the error -2
double tBinWidthK = d_fGTildeInfo->binWidthK;
// double tBinWidthK = 0.0025;
int tBinLowK = GetInterpLowBinTest(kGTilde,kKaxis,aKStar[idx]);
// int tBinLowK = GetInterpLowBinTestKaxisTest(aKStar[idx]);
int tBinHighK = tBinLowK+1;
double tBinLowCenterK = GetInterpLowBinTestCenter(kGTilde,kKaxis,aKStar[idx]);
// double tBinLowCenterK = GetInterpLowBinTestCenterKaxisTest(aKStar[idx]);
double tBinHighCenterK = tBinLowCenterK+tBinWidthK;
double tBinWidthR = d_fGTildeInfo->binWidthR;
// double tBinWidthR = 0.1;
int tBinLowR = GetInterpLowBinTest(kGTilde,kRaxis,aRStar[idx]);
// int tBinLowR = GetInterpLowBinTestRaxisTest(aRStar[idx]);
int tBinHighR = tBinLowR+1;
double tBinLowCenterR = GetInterpLowBinTestCenter(kGTilde,kRaxis,aRStar[idx]);
// double tBinLowCenterR = GetInterpLowBinTestCenterRaxisTest(aRStar[idx]);
double tBinHighCenterR = tBinLowCenterR+tBinWidthR;
//--------------------------
assert(tBinLowK>=0);
assert(tBinHighK<tNbinsK);
assert(tBinLowCenterK>0);
assert(tBinHighCenterK>0);
assert(tBinLowR>-3);
assert(tBinLowR>-2);
assert(tBinLowR>-1);
assert(tBinLowR>=0);
assert(tBinHighR<tNbinsR);
assert(tBinLowCenterR>0);
assert(tBinHighCenterR>0);
double tQ11Real = d_fGTildeReal[tBinLowR + tBinLowK*tNbinsR];
double tQ12Real = d_fGTildeReal[tBinHighR + tBinLowK*tNbinsR];
double tQ21Real = d_fGTildeReal[tBinLowR + tBinHighK*tNbinsR];
double tQ22Real = d_fGTildeReal[tBinHighR + tBinHighK*tNbinsR];
/*
double tQ11Imag = d_fGTildeImag[tBinLowR + tBinLowK*tNbinsR];
double tQ12Imag = d_fGTildeImag[tBinHighR + tBinLowK*tNbinsR];
double tQ21Imag = d_fGTildeImag[tBinLowR + tBinHighK*tNbinsR];
double tQ22Imag = d_fGTildeImag[tBinHighR + tBinHighK*tNbinsR];
*/
//--------------------------
double tD = 1.0*tBinWidthK*tBinWidthR;
tResultReal = (1.0/tD)*(tQ11Real*(tBinHighCenterK-aKStar[idx])*(tBinHighCenterR-aRStar[idx]) + tQ21Real*(aKStar[idx]-tBinLowCenterK)*(tBinHighCenterR-aRStar[idx]) + tQ12Real*(tBinHighCenterK-aKStar[idx])*(aRStar[idx]-tBinLowCenterR) + tQ22Real*(aKStar[idx]-tBinLowCenterK)*(aRStar[idx]-tBinLowCenterR));
// tResultImag = (1.0/tD)*(tQ11Imag*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Imag*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Imag*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Imag*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR));
//--------------------------
aGTildeReal[idx] = tResultReal;
}
//________________________________________________________________________________________________________________
//****************************************************************************************************************
//________________________________________________________________________________________________________________
InterpolateGPU::InterpolateGPU(int aNThreadsPerBlock, int aNBlocks) :
fNThreadsPerBlock(aNThreadsPerBlock),
fNBlocks(aNBlocks)
{
hipSetDeviceFlags(hipDeviceMapHost);
}
//________________________________________________________________________________________________________________
InterpolateGPU::~InterpolateGPU()
{
}
//________________________________________________________________________________________________________________
void InterpolateGPU::LoadGTildeReal(td2dVec &aGTildeReal)
{
int tNbinsK = aGTildeReal.size();
int tNbinsR = aGTildeReal[0].size();
int tSize = tNbinsK*tNbinsR*sizeof(double);
// checkCudaErrors(hipHostMalloc((void**) &fGTildeReal, tSize, hipHostMallocMapped));
checkCudaErrors(hipMallocManaged(&d_fGTildeReal, tSize));
int tIndex;
for(int iK=0; iK<tNbinsK; iK++)
{
for(int iR=0; iR<tNbinsR; iR++)
{
tIndex = iR + iK*tNbinsR;
d_fGTildeReal[tIndex] = aGTildeReal[iK][iR];
}
}
// checkCudaErrors(hipHostGetDevicePointer(&d_fGTildeReal, fGTildeReal, 0));
}
//________________________________________________________________________________________________________________
void InterpolateGPU::LoadGTildeImag(td2dVec &aGTildeImag)
{
int tNbinsK = aGTildeImag.size();
int tNbinsR = aGTildeImag[0].size();
int tSize = tNbinsK*tNbinsR*sizeof(double);
// checkCudaErrors(hipHostMalloc((void**) &fGTildeImag, tSize, hipHostMallocMapped));
checkCudaErrors(hipMallocManaged(&d_fGTildeImag, tSize));
int tIndex;
for(int iK=0; iK<tNbinsK; iK++)
{
for(int iR=0; iR<tNbinsR; iR++)
{
tIndex = iR + iK*tNbinsR;
d_fGTildeImag[tIndex] = aGTildeImag[iK][iR];
}
}
// checkCudaErrors(hipHostGetDevicePointer(&d_fGTildeImag, fGTildeImag, 0));
}
//________________________________________________________________________________________________________________
void InterpolateGPU::LoadGTildeInfo(BinInfoGTilde &aBinInfo)
{
// checkCudaErrors(hipHostMalloc((void**) &fGTildeInfo, sizeof(BinInfoGTilde), hipHostMallocMapped));
checkCudaErrors(hipMallocManaged(&d_fGTildeInfo, sizeof(BinInfoGTilde)));
d_fGTildeInfo->nBinsK = aBinInfo.nBinsK;
d_fGTildeInfo->nBinsR = aBinInfo.nBinsR;
d_fGTildeInfo->binWidthK = aBinInfo.binWidthK;
d_fGTildeInfo->binWidthR = aBinInfo.binWidthR;
d_fGTildeInfo->minK = aBinInfo.minK;
d_fGTildeInfo->maxK = aBinInfo.maxK;
d_fGTildeInfo->minR = aBinInfo.minR;
d_fGTildeInfo->maxR = aBinInfo.maxR;
d_fGTildeInfo->minInterpK = aBinInfo.minInterpK;
d_fGTildeInfo->maxInterpK = aBinInfo.maxInterpK;
d_fGTildeInfo->minInterpR = aBinInfo.minInterpR;
d_fGTildeInfo->maxInterpR = aBinInfo.maxInterpR;
// checkCudaErrors(hipHostGetDevicePointer(&d_fGTildeInfo, fGTildeInfo, 0));
}
//________________________________________________________________________________________________________________
vector<double> InterpolateGPU::RunBilinearInterpolate(vector<vector<double> > &aPairsIn)
{
// hipSetDeviceFlags(hipDeviceMapHost);
int tNPairs = aPairsIn.size();
int tSize = tNPairs*sizeof(double);
//---Host array allocations
double * h_KStarMag;
double * h_RStarMag;
double * h_GTildeReal;
checkCudaErrors(hipHostMalloc((void**) &h_KStarMag, tSize, hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**) &h_RStarMag, tSize, hipHostMallocMapped));
checkCudaErrors(hipHostMalloc((void**) &h_GTildeReal, tSize, hipHostMallocMapped));
for(int i=0; i<tNPairs; i++)
{
h_KStarMag[i] = aPairsIn[i][0];
h_RStarMag[i] = aPairsIn[i][1];
}
//---Device array allocations
//---Device arrays and allocations
double * d_KStarMag;
double * d_RStarMag;
double * d_GTildeReal;
checkCudaErrors(hipHostGetDevicePointer(&d_KStarMag, h_KStarMag, 0));
checkCudaErrors(hipHostGetDevicePointer(&d_RStarMag, h_RStarMag, 0));
checkCudaErrors(hipHostGetDevicePointer(&d_GTildeReal, h_GTildeReal, 0));
//----------Run the kernel-----------------------------------------------
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( GTildeInterpolate), dim3(fNBlocks),dim3(fNThreadsPerBlock), 0, 0, d_KStarMag,d_RStarMag,d_GTildeReal);
timer.Stop();
std::cout << "GTildeInterpolate kernel finished in " << timer.Elapsed() << " ms" << std::endl;
//The following is necessary for the host to be able to "see" the changes that have been done
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipHostFree(h_KStarMag));
checkCudaErrors(hipHostFree(h_RStarMag));
vector<double> tReturnVec(tNPairs);
for(int i=0; i<tNPairs; i++)
{
tReturnVec[i] = h_GTildeReal[i];
// cout << "i = " << i << endl;
// cout << "h_GTildeReal[i] = " << h_GTildeReal[i] << endl;
// cout << "tReturnVec[i] = " << tReturnVec[i] << endl << endl;
}
checkCudaErrors(hipHostFree(h_GTildeReal));
return tReturnVec;
}
/*
//________________________________________________________________________________________________________________
double* InterpolateGPU::RunBilinearInterpolate(double* host_out, double *aPairsIn, double *a2dVecIn)
{
hipSetDeviceFlags(hipDeviceMapHost);
int tNThreadsPerBlock = 1000;
int tNBlocks = 10;
double * device_out;
double * device_PairsIn;
double * device_2dVecIn;
GpuTimer timerCopy;
timerCopy.Start();
checkCudaErrors(hipHostGetDevicePointer(&device_out, host_out, 0));
checkCudaErrors(hipHostGetDevicePointer(&device_PairsIn, aPairsIn, 0));
checkCudaErrors(hipHostGetDevicePointer(&device_2dVecIn, a2dVecIn, 0));
timerCopy.Stop();
cout << "Time to copy: " << timerCopy.Elapsed() << "ms" << endl;
//---------------------
GpuTimer timer;
timer.Start();
BilinearInterpolateVector<<<tNBlocks,tNThreadsPerBlock>>>(device_out,device_PairsIn,device_2dVecIn);
timer.Stop();
std::cout << "Kernel finished in " << timer.Elapsed() << "ms" << std::endl;
//The following is necessary for the host to be able to "see" the changes that have been done
GpuTimer timerSync;
timerSync.Start();
checkCudaErrors(hipDeviceSynchronize());
timerSync.Stop();
cout << "Time to sync: " << timerSync.Elapsed() << "ms" << endl;
//-------------------------------------
// vector<double> ReturnVector(tNThreadsPerBlock);
// for(int i=0; i<tNThreadsPerBlock; i++)
// {
// ReturnVector[i] = host_out[i];
// }
// checkCudaErrors(hipHostFree(host_out));
checkCudaErrors(hipHostFree(aPairsIn));
checkCudaErrors(hipHostFree(a2dVecIn));
return host_out;
}
*/
| 3f42e61dc2fb6440342f5674ae8a7d0a5743b88f.cu | ///////////////////////////////////////////////////////////////////////////
// InterpolateGPU: //
///////////////////////////////////////////////////////////////////////////
#include "InterpolateGPU.h"
//________________________________________________________________________________________________________________
__device__ int GetBinNumberTest(double aBinSize, int aNbins, double aValue)
{
//TODO check the accuracy of this
double tBinKStarMin, tBinKStarMax;
for(int i=0; i<aNbins; i++)
{
tBinKStarMin = i*aBinSize;
tBinKStarMax = (i+1)*aBinSize;
if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i;
}
return -1; //i.e. failure
}
//________________________________________________________________________________________________________________
__device__ int GetBinNumberTest(int aNbins, double aMin, double aMax, double aValue)
{
//TODO check the accuracy of this
double tBinSize = (aMax-aMin)/aNbins;
double tBinKStarMin, tBinKStarMax;
for(int i=0; i<aNbins; i++)
{
tBinKStarMin = i*tBinSize + aMin;
tBinKStarMax = (i+1)*tBinSize + aMin;
if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i;
}
return -1; //i.e. failure
}
//________________________________________________________________________________________________________________
__device__ int GetBinNumberTest(double aBinWidth, double aMin, double aMax, double aValue)
{
//TODO check the accuracy of this
int tNbins = (aMax-aMin)/aBinWidth;
double tBinKStarMin, tBinKStarMax;
for(int i=0; i<tNbins; i++)
{
tBinKStarMin = i*aBinWidth + aMin;
tBinKStarMax = (i+1)*aBinWidth + aMin;
if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i;
}
return -1; //i.e. failure
}
//________________________________________________________________________________________________________________
__device__ int GetInterpLowBinTest(InterpType aInterpType, InterpAxisType aAxisType, double aVal)
{
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
bool tErrorFlag = false;
switch(aInterpType)
{
case kGTilde:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fGTildeInfo->nBinsK;
tBinWidth = d_fGTildeInfo->binWidthK;
tMin = d_fGTildeInfo->minK;
tMax = d_fGTildeInfo->maxK;
break;
case kRaxis:
tNbins = d_fGTildeInfo->nBinsR;
tBinWidth = d_fGTildeInfo->binWidthR;
tMin = d_fGTildeInfo->minR;
tMax = d_fGTildeInfo->maxR;
break;
//Invalid axis selection
case kThetaaxis:
tErrorFlag = true;
break;
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kHyperGeo1F1:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fHyperGeo1F1Info->nBinsK;
tBinWidth = d_fHyperGeo1F1Info->binWidthK;
tMin = d_fHyperGeo1F1Info->minK;
tMax = d_fHyperGeo1F1Info->maxK;
break;
case kRaxis:
tNbins = d_fHyperGeo1F1Info->nBinsR;
tBinWidth = d_fHyperGeo1F1Info->binWidthR;
tMin = d_fHyperGeo1F1Info->minR;
tMax = d_fHyperGeo1F1Info->maxR;
break;
case kThetaaxis:
tNbins = d_fHyperGeo1F1Info->nBinsTheta;
tBinWidth = d_fHyperGeo1F1Info->binWidthTheta;
tMin = d_fHyperGeo1F1Info->minTheta;
tMax = d_fHyperGeo1F1Info->maxTheta;
break;
//Invalid axis selection
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kScattLen:
switch(aAxisType)
{
case kReF0axis:
tNbins = d_fScattLenInfo->nBinsReF0;
tBinWidth = d_fScattLenInfo->binWidthReF0;
tMin = d_fScattLenInfo->minReF0;
tMax = d_fScattLenInfo->maxReF0;
break;
case kImF0axis:
tNbins = d_fScattLenInfo->nBinsImF0;
tBinWidth = d_fScattLenInfo->binWidthImF0;
tMin = d_fScattLenInfo->minImF0;
tMax = d_fScattLenInfo->maxImF0;
break;
case kD0axis:
tNbins = d_fScattLenInfo->nBinsD0;
tBinWidth = d_fScattLenInfo->binWidthD0;
tMin = d_fScattLenInfo->minD0;
tMax = d_fScattLenInfo->maxD0;
break;
case kKaxis:
tNbins = d_fScattLenInfo->nBinsK;
tBinWidth = d_fScattLenInfo->binWidthK;
tMin = d_fScattLenInfo->minK;
tMax = d_fScattLenInfo->maxK;
break;
//Invalid axis selection
case kRaxis:
tErrorFlag = true;
break;
case kThetaaxis:
tErrorFlag = true;
break;
}
break;
}
//Check error
if(tErrorFlag) return -3;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0) return -2;
if(tReturnBin>=tNbins) return -1;
// if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
else return tReturnBin;
}
//________________________________________________________________________________________________________________
__device__ int GetInterpLowBinTestKaxisTest(double aVal)
{
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 160;
tBinWidth = 0.0025;
tMin = 0.0;
tMax = 0.40;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
else return tReturnBin;
}
//________________________________________________________________________________________________________________
__device__ int GetInterpLowBinTestRaxisTest(double aVal)
{
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 100;
tBinWidth = 0.1;
tMin = 0.0;
tMax = 10.0;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
else return tReturnBin;
}
//________________________________________________________________________________________________________________
__device__ double GetInterpLowBinTestCenter(InterpType aInterpType, InterpAxisType aAxisType, double aVal)
{
double tReturnValue;
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
bool tErrorFlag = false;
switch(aInterpType)
{
case kGTilde:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fGTildeInfo->nBinsK;
tBinWidth = d_fGTildeInfo->binWidthK;
tMin = d_fGTildeInfo->minK;
tMax = d_fGTildeInfo->maxK;
break;
case kRaxis:
tNbins = d_fGTildeInfo->nBinsR;
tBinWidth = d_fGTildeInfo->binWidthR;
tMin = d_fGTildeInfo->minR;
tMax = d_fGTildeInfo->maxR;
break;
//Invalid axis selection
case kThetaaxis:
tErrorFlag = true;
break;
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kHyperGeo1F1:
switch(aAxisType)
{
case kKaxis:
tNbins = d_fHyperGeo1F1Info->nBinsK;
tBinWidth = d_fHyperGeo1F1Info->binWidthK;
tMin = d_fHyperGeo1F1Info->minK;
tMax = d_fHyperGeo1F1Info->maxK;
break;
case kRaxis:
tNbins = d_fHyperGeo1F1Info->nBinsR;
tBinWidth = d_fHyperGeo1F1Info->binWidthR;
tMin = d_fHyperGeo1F1Info->minR;
tMax = d_fHyperGeo1F1Info->maxR;
break;
case kThetaaxis:
tNbins = d_fHyperGeo1F1Info->nBinsTheta;
tBinWidth = d_fHyperGeo1F1Info->binWidthTheta;
tMin = d_fHyperGeo1F1Info->minTheta;
tMax = d_fHyperGeo1F1Info->maxTheta;
break;
//Invalid axis selection
case kReF0axis:
tErrorFlag = true;
break;
case kImF0axis:
tErrorFlag = true;
break;
case kD0axis:
tErrorFlag = true;
break;
}
break;
case kScattLen:
switch(aAxisType)
{
case kReF0axis:
tNbins = d_fScattLenInfo->nBinsReF0;
tBinWidth = d_fScattLenInfo->binWidthReF0;
tMin = d_fScattLenInfo->minReF0;
tMax = d_fScattLenInfo->maxReF0;
break;
case kImF0axis:
tNbins = d_fScattLenInfo->nBinsImF0;
tBinWidth = d_fScattLenInfo->binWidthImF0;
tMin = d_fScattLenInfo->minImF0;
tMax = d_fScattLenInfo->maxImF0;
break;
case kD0axis:
tNbins = d_fScattLenInfo->nBinsD0;
tBinWidth = d_fScattLenInfo->binWidthD0;
tMin = d_fScattLenInfo->minD0;
tMax = d_fScattLenInfo->maxD0;
break;
case kKaxis:
tNbins = d_fScattLenInfo->nBinsK;
tBinWidth = d_fScattLenInfo->binWidthK;
tMin = d_fScattLenInfo->minK;
tMax = d_fScattLenInfo->maxK;
break;
//Invalid axis selection
case kRaxis:
tErrorFlag = true;
break;
case kThetaaxis:
tErrorFlag = true;
break;
}
break;
}
//Check error
if(tErrorFlag) return -2;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth;
return tReturnValue;
}
//________________________________________________________________________________________________________________
__device__ double GetInterpLowBinTestCenterKaxisTest(double aVal)
{
double tReturnValue;
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 160;
tBinWidth = 0.0025;
tMin = 0.0;
tMax = 0.40;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth;
return tReturnValue;
}
//________________________________________________________________________________________________________________
__device__ double GetInterpLowBinTestCenterRaxisTest(double aVal)
{
double tReturnValue;
int tReturnBin = -2;
int tNbins, tBin;
double tMin, tMax, tBinWidth, tBinCenter;
tNbins = 100;
tBinWidth = 0.1;
tMin = 0.0;
tMax = 10.0;
//---------------------------------
tBin = GetBinNumberTest(tNbins,tMin,tMax,aVal);
tBinCenter = tMin + (tBin+0.5)*tBinWidth;
if(aVal < tBinCenter) tReturnBin = tBin-1;
else tReturnBin = tBin;
if(tReturnBin<0 || tReturnBin >= tNbins) return -2;
tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth;
return tReturnValue;
}
//________________________________________________________________________________________________________________
__global__ void GTildeInterpolate(double* aKStar, double* aRStar, double* aGTildeReal)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
double tResultReal = 0.;
// double tResultImag = 0.;
//----------------------------
int tNbinsR = d_fGTildeInfo->nBinsR;
int tNbinsK = d_fGTildeInfo->nBinsK;
//----------------------------
//TODO put in check to make sure GetInterpLowBinTestCenter does not return the error -2
double tBinWidthK = d_fGTildeInfo->binWidthK;
// double tBinWidthK = 0.0025;
int tBinLowK = GetInterpLowBinTest(kGTilde,kKaxis,aKStar[idx]);
// int tBinLowK = GetInterpLowBinTestKaxisTest(aKStar[idx]);
int tBinHighK = tBinLowK+1;
double tBinLowCenterK = GetInterpLowBinTestCenter(kGTilde,kKaxis,aKStar[idx]);
// double tBinLowCenterK = GetInterpLowBinTestCenterKaxisTest(aKStar[idx]);
double tBinHighCenterK = tBinLowCenterK+tBinWidthK;
double tBinWidthR = d_fGTildeInfo->binWidthR;
// double tBinWidthR = 0.1;
int tBinLowR = GetInterpLowBinTest(kGTilde,kRaxis,aRStar[idx]);
// int tBinLowR = GetInterpLowBinTestRaxisTest(aRStar[idx]);
int tBinHighR = tBinLowR+1;
double tBinLowCenterR = GetInterpLowBinTestCenter(kGTilde,kRaxis,aRStar[idx]);
// double tBinLowCenterR = GetInterpLowBinTestCenterRaxisTest(aRStar[idx]);
double tBinHighCenterR = tBinLowCenterR+tBinWidthR;
//--------------------------
assert(tBinLowK>=0);
assert(tBinHighK<tNbinsK);
assert(tBinLowCenterK>0);
assert(tBinHighCenterK>0);
assert(tBinLowR>-3);
assert(tBinLowR>-2);
assert(tBinLowR>-1);
assert(tBinLowR>=0);
assert(tBinHighR<tNbinsR);
assert(tBinLowCenterR>0);
assert(tBinHighCenterR>0);
double tQ11Real = d_fGTildeReal[tBinLowR + tBinLowK*tNbinsR];
double tQ12Real = d_fGTildeReal[tBinHighR + tBinLowK*tNbinsR];
double tQ21Real = d_fGTildeReal[tBinLowR + tBinHighK*tNbinsR];
double tQ22Real = d_fGTildeReal[tBinHighR + tBinHighK*tNbinsR];
/*
double tQ11Imag = d_fGTildeImag[tBinLowR + tBinLowK*tNbinsR];
double tQ12Imag = d_fGTildeImag[tBinHighR + tBinLowK*tNbinsR];
double tQ21Imag = d_fGTildeImag[tBinLowR + tBinHighK*tNbinsR];
double tQ22Imag = d_fGTildeImag[tBinHighR + tBinHighK*tNbinsR];
*/
//--------------------------
double tD = 1.0*tBinWidthK*tBinWidthR;
tResultReal = (1.0/tD)*(tQ11Real*(tBinHighCenterK-aKStar[idx])*(tBinHighCenterR-aRStar[idx]) + tQ21Real*(aKStar[idx]-tBinLowCenterK)*(tBinHighCenterR-aRStar[idx]) + tQ12Real*(tBinHighCenterK-aKStar[idx])*(aRStar[idx]-tBinLowCenterR) + tQ22Real*(aKStar[idx]-tBinLowCenterK)*(aRStar[idx]-tBinLowCenterR));
// tResultImag = (1.0/tD)*(tQ11Imag*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Imag*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Imag*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Imag*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR));
//--------------------------
aGTildeReal[idx] = tResultReal;
}
//________________________________________________________________________________________________________________
//****************************************************************************************************************
//________________________________________________________________________________________________________________
InterpolateGPU::InterpolateGPU(int aNThreadsPerBlock, int aNBlocks) :
fNThreadsPerBlock(aNThreadsPerBlock),
fNBlocks(aNBlocks)
{
cudaSetDeviceFlags(cudaDeviceMapHost);
}
//________________________________________________________________________________________________________________
InterpolateGPU::~InterpolateGPU()
{
}
//________________________________________________________________________________________________________________
void InterpolateGPU::LoadGTildeReal(td2dVec &aGTildeReal)
{
int tNbinsK = aGTildeReal.size();
int tNbinsR = aGTildeReal[0].size();
int tSize = tNbinsK*tNbinsR*sizeof(double);
// checkCudaErrors(cudaHostAlloc((void**) &fGTildeReal, tSize, cudaHostAllocMapped));
checkCudaErrors(cudaMallocManaged(&d_fGTildeReal, tSize));
int tIndex;
for(int iK=0; iK<tNbinsK; iK++)
{
for(int iR=0; iR<tNbinsR; iR++)
{
tIndex = iR + iK*tNbinsR;
d_fGTildeReal[tIndex] = aGTildeReal[iK][iR];
}
}
// checkCudaErrors(cudaHostGetDevicePointer(&d_fGTildeReal, fGTildeReal, 0));
}
//________________________________________________________________________________________________________________
void InterpolateGPU::LoadGTildeImag(td2dVec &aGTildeImag)
{
int tNbinsK = aGTildeImag.size();
int tNbinsR = aGTildeImag[0].size();
int tSize = tNbinsK*tNbinsR*sizeof(double);
// checkCudaErrors(cudaHostAlloc((void**) &fGTildeImag, tSize, cudaHostAllocMapped));
checkCudaErrors(cudaMallocManaged(&d_fGTildeImag, tSize));
int tIndex;
for(int iK=0; iK<tNbinsK; iK++)
{
for(int iR=0; iR<tNbinsR; iR++)
{
tIndex = iR + iK*tNbinsR;
d_fGTildeImag[tIndex] = aGTildeImag[iK][iR];
}
}
// checkCudaErrors(cudaHostGetDevicePointer(&d_fGTildeImag, fGTildeImag, 0));
}
//________________________________________________________________________________________________________________
void InterpolateGPU::LoadGTildeInfo(BinInfoGTilde &aBinInfo)
{
// checkCudaErrors(cudaHostAlloc((void**) &fGTildeInfo, sizeof(BinInfoGTilde), cudaHostAllocMapped));
checkCudaErrors(cudaMallocManaged(&d_fGTildeInfo, sizeof(BinInfoGTilde)));
d_fGTildeInfo->nBinsK = aBinInfo.nBinsK;
d_fGTildeInfo->nBinsR = aBinInfo.nBinsR;
d_fGTildeInfo->binWidthK = aBinInfo.binWidthK;
d_fGTildeInfo->binWidthR = aBinInfo.binWidthR;
d_fGTildeInfo->minK = aBinInfo.minK;
d_fGTildeInfo->maxK = aBinInfo.maxK;
d_fGTildeInfo->minR = aBinInfo.minR;
d_fGTildeInfo->maxR = aBinInfo.maxR;
d_fGTildeInfo->minInterpK = aBinInfo.minInterpK;
d_fGTildeInfo->maxInterpK = aBinInfo.maxInterpK;
d_fGTildeInfo->minInterpR = aBinInfo.minInterpR;
d_fGTildeInfo->maxInterpR = aBinInfo.maxInterpR;
// checkCudaErrors(cudaHostGetDevicePointer(&d_fGTildeInfo, fGTildeInfo, 0));
}
//________________________________________________________________________________________________________________
vector<double> InterpolateGPU::RunBilinearInterpolate(vector<vector<double> > &aPairsIn)
{
// cudaSetDeviceFlags(cudaDeviceMapHost);
int tNPairs = aPairsIn.size();
int tSize = tNPairs*sizeof(double);
//---Host array allocations
double * h_KStarMag;
double * h_RStarMag;
double * h_GTildeReal;
checkCudaErrors(cudaHostAlloc((void**) &h_KStarMag, tSize, cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**) &h_RStarMag, tSize, cudaHostAllocMapped));
checkCudaErrors(cudaHostAlloc((void**) &h_GTildeReal, tSize, cudaHostAllocMapped));
for(int i=0; i<tNPairs; i++)
{
h_KStarMag[i] = aPairsIn[i][0];
h_RStarMag[i] = aPairsIn[i][1];
}
//---Device array allocations
//---Device arrays and allocations
double * d_KStarMag;
double * d_RStarMag;
double * d_GTildeReal;
checkCudaErrors(cudaHostGetDevicePointer(&d_KStarMag, h_KStarMag, 0));
checkCudaErrors(cudaHostGetDevicePointer(&d_RStarMag, h_RStarMag, 0));
checkCudaErrors(cudaHostGetDevicePointer(&d_GTildeReal, h_GTildeReal, 0));
//----------Run the kernel-----------------------------------------------
GpuTimer timer;
timer.Start();
GTildeInterpolate<<<fNBlocks,fNThreadsPerBlock>>>(d_KStarMag,d_RStarMag,d_GTildeReal);
timer.Stop();
std::cout << "GTildeInterpolate kernel finished in " << timer.Elapsed() << " ms" << std::endl;
//The following is necessary for the host to be able to "see" the changes that have been done
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFreeHost(h_KStarMag));
checkCudaErrors(cudaFreeHost(h_RStarMag));
vector<double> tReturnVec(tNPairs);
for(int i=0; i<tNPairs; i++)
{
tReturnVec[i] = h_GTildeReal[i];
// cout << "i = " << i << endl;
// cout << "h_GTildeReal[i] = " << h_GTildeReal[i] << endl;
// cout << "tReturnVec[i] = " << tReturnVec[i] << endl << endl;
}
checkCudaErrors(cudaFreeHost(h_GTildeReal));
return tReturnVec;
}
/*
//________________________________________________________________________________________________________________
double* InterpolateGPU::RunBilinearInterpolate(double* host_out, double *aPairsIn, double *a2dVecIn)
{
cudaSetDeviceFlags(cudaDeviceMapHost);
int tNThreadsPerBlock = 1000;
int tNBlocks = 10;
double * device_out;
double * device_PairsIn;
double * device_2dVecIn;
GpuTimer timerCopy;
timerCopy.Start();
checkCudaErrors(cudaHostGetDevicePointer(&device_out, host_out, 0));
checkCudaErrors(cudaHostGetDevicePointer(&device_PairsIn, aPairsIn, 0));
checkCudaErrors(cudaHostGetDevicePointer(&device_2dVecIn, a2dVecIn, 0));
timerCopy.Stop();
cout << "Time to copy: " << timerCopy.Elapsed() << "ms" << endl;
//---------------------
GpuTimer timer;
timer.Start();
BilinearInterpolateVector<<<tNBlocks,tNThreadsPerBlock>>>(device_out,device_PairsIn,device_2dVecIn);
timer.Stop();
std::cout << "Kernel finished in " << timer.Elapsed() << "ms" << std::endl;
//The following is necessary for the host to be able to "see" the changes that have been done
GpuTimer timerSync;
timerSync.Start();
checkCudaErrors(cudaDeviceSynchronize());
timerSync.Stop();
cout << "Time to sync: " << timerSync.Elapsed() << "ms" << endl;
//-------------------------------------
// vector<double> ReturnVector(tNThreadsPerBlock);
// for(int i=0; i<tNThreadsPerBlock; i++)
// {
// ReturnVector[i] = host_out[i];
// }
// checkCudaErrors(cudaFreeHost(host_out));
checkCudaErrors(cudaFreeHost(aPairsIn));
checkCudaErrors(cudaFreeHost(a2dVecIn));
return host_out;
}
*/
|
9762628baed3a12cdf75d7aa3c8acd42bcffbfd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <random>
#include <sys/time.h>
#include "DCT.h"
using namespace std;
#define MATRIX_SIZE 256
#define BLOCK_SIZE 8
__global__ void DCT1(float * inputImage, float * temp, float * dct8x8Mat, int matrixSize){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const int j = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0f;
int x = i % BLOCK_SIZE;
//int y = j % BLOCK_SIZE;
for (int k = 0;k < BLOCK_SIZE;k++) {
sum += dct8x8Mat[x*BLOCK_SIZE + k] * inputImage[k*matrixSize + j];
}
temp[i*matrixSize + j] = sum;
}
__global__ void DCT2(float * temp, float * dctCoeffMatrix, float * dct8x8TMat, int matrixSize){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const int j = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0f;
//int x = i % BLOCK_SIZE;
int y = j % BLOCK_SIZE;
for (int k = 0;k < BLOCK_SIZE;k++) {
sum += temp[i*matrixSize + k] * dct8x8TMat[k*BLOCK_SIZE + y];
}
dctCoeffMatrix[i*matrixSize + j] = sum;
}
int main(){
float *d_img, *d_temp, *d_dctCoeffMatrix, *d_dct8x8Mat, *d_dct8x8TMat;
struct timeval start,end;
double elapsedTime;
const int32_t matrixSize = MATRIX_SIZE;
mt19937 rng(time(NULL));
uniform_int_distribution<int> gen(0, 255);
float *h_img = (float *)malloc(matrixSize * matrixSize * sizeof(float));
float *h_temp = (float *)malloc(matrixSize * matrixSize * sizeof(float));
float *h_dctCoeffMatrix = (float *)malloc(matrixSize * matrixSize * sizeof(float));
for(int i=0;i<matrixSize;i++){
for(int j=0;j<matrixSize;j++){
*(h_img + i * matrixSize + j) = static_cast<float>(gen(rng));
*(h_img + i * matrixSize + j) = *(h_img + i * matrixSize + j) - 128;
}
}
hipMalloc(&d_img, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
hipMalloc(&d_temp, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
hipMalloc(&d_dctCoeffMatrix, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
hipMalloc(&d_dct8x8Mat, BLOCK_SIZE * BLOCK_SIZE * sizeof(float));
hipMalloc(&d_dct8x8TMat, BLOCK_SIZE * BLOCK_SIZE * sizeof(float));
// Keep track of when we start doing work
gettimeofday(&start, NULL);
hipMemcpy(d_img, h_img, MATRIX_SIZE * MATRIX_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_dct8x8Mat, dct8x8Matrix, BLOCK_SIZE * BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_dct8x8TMat, dct8x8MatrixTranspose, BLOCK_SIZE * BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
dim3 threads(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(MATRIX_SIZE/threads.x,MATRIX_SIZE/threads.y);
hipLaunchKernelGGL(( DCT1), dim3(grid),dim3(threads), 0, 0, d_img, d_temp, d_dct8x8Mat, MATRIX_SIZE);
hipDeviceSynchronize();
hipLaunchKernelGGL(( DCT2), dim3(grid),dim3(threads), 0, 0, d_temp, d_dctCoeffMatrix, d_dct8x8TMat, MATRIX_SIZE);
hipDeviceSynchronize();
hipMemcpy(h_dctCoeffMatrix, d_dctCoeffMatrix, MATRIX_SIZE * MATRIX_SIZE * sizeof(float), hipMemcpyDeviceToHost);
// Keep track of when we finish our work
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
if(elapsedTime >= 0.001)
printf("CUDA : %.3f seconds\n",elapsedTime);
else
printf("CUDA : %.4f seconds\n",elapsedTime);
hipFree(d_img);
hipFree(d_temp);
hipFree(d_dct8x8Mat);
hipFree(d_dct8x8TMat);
hipFree(d_dctCoeffMatrix);
return 0;
} | 9762628baed3a12cdf75d7aa3c8acd42bcffbfd1.cu | #include <iostream>
#include <stdio.h>
#include <random>
#include <sys/time.h>
#include "DCT.h"
using namespace std;
#define MATRIX_SIZE 256
#define BLOCK_SIZE 8
__global__ void DCT1(float * inputImage, float * temp, float * dct8x8Mat, int matrixSize){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const int j = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0f;
int x = i % BLOCK_SIZE;
//int y = j % BLOCK_SIZE;
for (int k = 0;k < BLOCK_SIZE;k++) {
sum += dct8x8Mat[x*BLOCK_SIZE + k] * inputImage[k*matrixSize + j];
}
temp[i*matrixSize + j] = sum;
}
__global__ void DCT2(float * temp, float * dctCoeffMatrix, float * dct8x8TMat, int matrixSize){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const int j = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0f;
//int x = i % BLOCK_SIZE;
int y = j % BLOCK_SIZE;
for (int k = 0;k < BLOCK_SIZE;k++) {
sum += temp[i*matrixSize + k] * dct8x8TMat[k*BLOCK_SIZE + y];
}
dctCoeffMatrix[i*matrixSize + j] = sum;
}
int main(){
float *d_img, *d_temp, *d_dctCoeffMatrix, *d_dct8x8Mat, *d_dct8x8TMat;
struct timeval start,end;
double elapsedTime;
const int32_t matrixSize = MATRIX_SIZE;
mt19937 rng(time(NULL));
uniform_int_distribution<int> gen(0, 255);
float *h_img = (float *)malloc(matrixSize * matrixSize * sizeof(float));
float *h_temp = (float *)malloc(matrixSize * matrixSize * sizeof(float));
float *h_dctCoeffMatrix = (float *)malloc(matrixSize * matrixSize * sizeof(float));
for(int i=0;i<matrixSize;i++){
for(int j=0;j<matrixSize;j++){
*(h_img + i * matrixSize + j) = static_cast<float>(gen(rng));
*(h_img + i * matrixSize + j) = *(h_img + i * matrixSize + j) - 128;
}
}
cudaMalloc(&d_img, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMalloc(&d_temp, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMalloc(&d_dctCoeffMatrix, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMalloc(&d_dct8x8Mat, BLOCK_SIZE * BLOCK_SIZE * sizeof(float));
cudaMalloc(&d_dct8x8TMat, BLOCK_SIZE * BLOCK_SIZE * sizeof(float));
// Keep track of when we start doing work
gettimeofday(&start, NULL);
cudaMemcpy(d_img, h_img, MATRIX_SIZE * MATRIX_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_dct8x8Mat, dct8x8Matrix, BLOCK_SIZE * BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_dct8x8TMat, dct8x8MatrixTranspose, BLOCK_SIZE * BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
dim3 threads(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(MATRIX_SIZE/threads.x,MATRIX_SIZE/threads.y);
DCT1<<<grid,threads>>>(d_img, d_temp, d_dct8x8Mat, MATRIX_SIZE);
cudaDeviceSynchronize();
DCT2<<<grid,threads>>>(d_temp, d_dctCoeffMatrix, d_dct8x8TMat, MATRIX_SIZE);
cudaDeviceSynchronize();
cudaMemcpy(h_dctCoeffMatrix, d_dctCoeffMatrix, MATRIX_SIZE * MATRIX_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
// Keep track of when we finish our work
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
if(elapsedTime >= 0.001)
printf("CUDA : %.3f seconds\n",elapsedTime);
else
printf("CUDA : %.4f seconds\n",elapsedTime);
cudaFree(d_img);
cudaFree(d_temp);
cudaFree(d_dct8x8Mat);
cudaFree(d_dct8x8TMat);
cudaFree(d_dctCoeffMatrix);
return 0;
} |
f27833abbd574cd3ee4011c2607e87c81eb3a60f.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrix.hh"
#include "nn_exception.hh"
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data_device(nullptr), data_host(nullptr),
device_allocated(false), host_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
void Matrix::allocateCudaMemory() {
if (!device_allocated) {
float* device_memory = nullptr;
hipMalloc(&device_memory, shape.x * shape.y * sizeof(float));
NNException::throwIfDeviceErrorsOccurred("Cannot allocate CUDA memory for Tensor3D.");
data_device = std::shared_ptr<float>(device_memory,
[&](float* ptr){ hipFree(ptr); });
device_allocated = true;
}
}
void Matrix::allocateHostMemory() {
if (!host_allocated) {
data_host = std::shared_ptr<float>(new float[shape.x * shape.y],
[&](float* ptr){ delete[] ptr; });
host_allocated = true;
}
}
void Matrix::allocateMemory() {
allocateCudaMemory();
allocateHostMemory();
}
void Matrix::allocateMemoryIfNotAllocated(Shape shape) {
if (!device_allocated && !host_allocated) {
this->shape = shape;
allocateMemory();
}
}
void Matrix::copyHostToDevice() {
if (device_allocated && host_allocated) {
hipMemcpy(data_device.get(), data_host.get(), shape.x * shape.y * sizeof(float), hipMemcpyHostToDevice);
NNException::throwIfDeviceErrorsOccurred("Cannot copy host data to CUDA device.");
}
else {
throw NNException("Cannot copy host data to not allocated memory on device.");
}
}
void Matrix::copyDeviceToHost() {
if (device_allocated && host_allocated) {
hipMemcpy(data_host.get(), data_device.get(), shape.x * shape.y * sizeof(float), hipMemcpyDeviceToHost);
NNException::throwIfDeviceErrorsOccurred("Cannot copy device data to host.");
}
else {
throw NNException("Cannot copy device data to not allocated memory on host.");
}
}
float& Matrix::operator[](const int index) {
return data_host.get()[index];
}
const float& Matrix::operator[](const int index) const {
return data_host.get()[index];
}
| f27833abbd574cd3ee4011c2607e87c81eb3a60f.cu | #include "matrix.hh"
#include "nn_exception.hh"
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data_device(nullptr), data_host(nullptr),
device_allocated(false), host_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
void Matrix::allocateCudaMemory() {
if (!device_allocated) {
float* device_memory = nullptr;
cudaMalloc(&device_memory, shape.x * shape.y * sizeof(float));
NNException::throwIfDeviceErrorsOccurred("Cannot allocate CUDA memory for Tensor3D.");
data_device = std::shared_ptr<float>(device_memory,
[&](float* ptr){ cudaFree(ptr); });
device_allocated = true;
}
}
void Matrix::allocateHostMemory() {
if (!host_allocated) {
data_host = std::shared_ptr<float>(new float[shape.x * shape.y],
[&](float* ptr){ delete[] ptr; });
host_allocated = true;
}
}
void Matrix::allocateMemory() {
allocateCudaMemory();
allocateHostMemory();
}
void Matrix::allocateMemoryIfNotAllocated(Shape shape) {
if (!device_allocated && !host_allocated) {
this->shape = shape;
allocateMemory();
}
}
void Matrix::copyHostToDevice() {
if (device_allocated && host_allocated) {
cudaMemcpy(data_device.get(), data_host.get(), shape.x * shape.y * sizeof(float), cudaMemcpyHostToDevice);
NNException::throwIfDeviceErrorsOccurred("Cannot copy host data to CUDA device.");
}
else {
throw NNException("Cannot copy host data to not allocated memory on device.");
}
}
void Matrix::copyDeviceToHost() {
if (device_allocated && host_allocated) {
cudaMemcpy(data_host.get(), data_device.get(), shape.x * shape.y * sizeof(float), cudaMemcpyDeviceToHost);
NNException::throwIfDeviceErrorsOccurred("Cannot copy device data to host.");
}
else {
throw NNException("Cannot copy device data to not allocated memory on host.");
}
}
float& Matrix::operator[](const int index) {
return data_host.get()[index];
}
const float& Matrix::operator[](const int index) const {
return data_host.get()[index];
}
|
47f445f7895d85441ea8facf0666ad2a3ec9d93c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <stdio.h>
#include "dnn.hpp"
using namespace std;
//Define the parameters if not defined externally
#ifndef Sy
#define Sy 1
#define Sx 1
#endif
#ifndef Tnn
//Tiling Sizes
#define Tnn 32
#define Tn 16
#define Ti 16
#define Ty 8
#define Tx 8
#endif
#define NYPAD (Ny + Ky)
#define NXPAD (Nx + Kx)
#define NYSCL (Ny / Sy)
#define NXSCL (Nx / Sx)
#define SYNAPSE_SIZE (1L * Ky * Kx * Nn * Ni)
VTYPE (*synapse)
[Ky][Kx][Nn][Ni];
VTYPE (*neuron_i)
[NYPAD][NXPAD][Ni];
VTYPE (*neuron_n)
[NYSCL][NXSCL][Nn];
VTYPE (*neuron_n2)
[NYSCL][NXSCL][Nn];
void fill_convolution_shared_simple(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni])
{
for (int yy = 0; yy < Ky; ++yy)
{
for (int xx = 0; xx < Kx; ++xx)
{
for (int nn = 0; nn < Nn; ++nn)
{
for (int ni = 0; ni < Ni; ++ni)
{
synapse[yy][xx][nn][ni] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX) - 0.5f;
}
}
}
}
for (int yy = 0; yy < NYPAD; ++yy)
{
for (int xx = 0; xx < NXPAD; ++xx)
{
for (int ni = 0; ni < Ni; ++ni)
{
neuron_i[yy][xx][ni] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX) - 0.5f;
}
}
}
}
void convolution_layer_blocked(
VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn])
{
int c1 = 0, c2 = 0;
VTYPE sum[Nn] = {0};
for (int yy = 0; yy < Ny; yy += Ty)
{
for (int xx = 0; xx < Nx; xx += Tx)
{
for (int nnn = 0; nnn < Nn; nnn += Tnn)
{
int yout = yy / Sy;
for (int y = yy; y < yy + Ty; y += Sy)
{ // tiling for y;
int xout = xx / Sx;
for (int x = xx; x < xx + Tx; x += Sx)
{ // tiling for x;
for (int nn = nnn; nn < nnn + Tnn; nn += Tn)
{
for (int n = nn; n < nn + Tn; n++)
{
sum[n] = 0;
}
for (int ky = 0; ky < Ky; ky++)
{ // sliding window;
for (int kx = 0; kx < Kx; kx++)
{
int ii = 0;
VTYPE sum_sc;
for (; ii < Ni - Ti + 1; ii += Ti)
{
for (int n = nn; n < nn + Tn; n++)
{
sum_sc = 0;
for (int i = ii; i < ii + Ti; i++)
{
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum_sc += sv * nv;
}
sum[n] += sum_sc;
}
}
}
}
//transfer
for (int n = nn; n < nn + Tn; n++)
{
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
}
}
}
void convolution_layer(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn])
{
VTYPE sum[Nn] = {0};
// Original code (excluding nn, ii loops)
int yout = 0;
for (int y = 0; y < Ny; y += Sy)
{ // tiling for y;
int xout = 0;
for (int x = 0; x < Ny; x += Sx)
{ // tiling for x;
for (int nn = 0; nn < Nn; nn += Tn)
{
for (int n = nn; n < nn + Tn; n++)
{
sum[n] = 0;
}
// sliding window;
for (int ky = 0; ky < Ky; ky++)
for (int kx = 0; kx < Kx; kx++)
for (int n = nn; n < nn + Tn; n++)
for (int i = 0; i < Ni; i++)
{
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum[n] += sv * nv;
}
for (int n = nn; n < nn + Tn; n++)
{
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
__global__ void convolution_layer_cuda(VTYPE* synapse, VTYPE* neuron_i, VTYPE* neuron_n)
{
VTYPE sum[Nn] = {0};
// Original code (excluding nn, ii loops)
int yout = 0;
int index = threadIdx.x;
int stride = blockDim.x;
for (int y = index; y < Ny; y += stride)
{ // tiling for y;
for (int x = 0; x < Ny; x += Sx)
{ // tiling for x;
for (int nn = 0; nn < Nn; nn += Tn)
{
for (int n = nn; n < nn + Tn; n++)
{
sum[n] = 0;
}
// sliding window;
for (int ky = 0; ky < Ky; ky++)
for (int kx = 0; kx < Kx; kx++)
for (int n = nn; n < nn + Tn; n++)
for (int i = 0; i < Ni; i++)
{
VTYPE sv = synapse[ky * (Kx * Nn * Ni) + kx * (Nn * Ni) + n * Ni + i]; //[ky][kx][n][i];
VTYPE nv = neuron_i[(ky + y) * (NXPAD * Ni) + (kx + x) * Ni + i]; //[ky + y][kx + x][i];
sum[n] += sv * nv;
}
for (int n = nn; n < nn + Tn; n++)
{
neuron_n[y * (NXSCL * Nn) + x * Nn + n] = (sum[n]>0) ? sum[n] : sum[n]/4;
//printf("yout: %d, xout: %d, index: %d, neuron:%f\n", y, x, index, neuron_n[y * (NXSCL * Nn) + x * Nn + n]);
}
}
}
}
}
int main(const int argc, const char **argv)
{
cout << "allocating memory\n";
synapse = (VTYPE(*)[Ky][Kx][Nn][Ni])malloc(SYNAPSE_SIZE * sizeof(VTYPE));
neuron_i = (VTYPE(*)[NYPAD][NXPAD][Ni])malloc(NYPAD * NXPAD * Ni * sizeof(VTYPE));
neuron_n = (VTYPE(*)[NYSCL][NXSCL][Nn])malloc(NYSCL * NXSCL * Nn * sizeof(VTYPE));
neuron_n2 = (VTYPE(*)[NYSCL][NXSCL][Nn])malloc(NYSCL * NXSCL * Nn * sizeof(VTYPE));
cout << "allocating memory for CUDA \n";
VTYPE* d_synapse = NULL;
VTYPE* d_neuron_i = NULL;
VTYPE* d_neuron_n = NULL;
VTYPE* d_neuron_n2 = NULL;
hipMalloc((void**) &d_synapse, SYNAPSE_SIZE * sizeof(VTYPE));
hipMalloc((void**) &d_neuron_i, NYPAD * NXPAD * Ni * sizeof(VTYPE));
hipMalloc((void**) &d_neuron_n, NYSCL * NXSCL * Nn * sizeof(VTYPE));
// reserved for blocked version of the conv layer
hipMalloc((void**) &d_neuron_n2, NYSCL * NXSCL * Nn * sizeof(VTYPE));
cout << "initializing arrays\n";
fill_convolution_shared_simple(*synapse, *neuron_i);
cout << "copying initialized arrays from host to device\n";
hipMemcpy(d_synapse, synapse, SYNAPSE_SIZE * sizeof(VTYPE), hipMemcpyHostToDevice);
hipMemcpy(d_neuron_i, neuron_i, NYPAD * NXPAD * Ni * sizeof(VTYPE), hipMemcpyHostToDevice);
cout << "starting computation\n";
// simple Version
convolution_layer(*synapse, *neuron_i, *neuron_n);
cout << "simple version complete!\n";
// simple CUDA version
hipLaunchKernelGGL(( convolution_layer_cuda), dim3(1),dim3(256), 0, 0, d_synapse, d_neuron_i, d_neuron_n);
hipDeviceSynchronize();
hipMemcpy(neuron_n2, d_neuron_n, NYSCL * NXSCL * Nn * sizeof(VTYPE), hipMemcpyDeviceToHost);
cout << "cuda simple version complete!\n";
// //Blocked Version
// convolution_layer_blocked(*synapse, *neuron_i, *neuron_n2);
// cout << "blocked computation complete!\n";
// verify the results
compare((VTYPE *)*neuron_n, (VTYPE *)*neuron_n2, NYSCL * NXSCL * Nn);
free(synapse);
free(neuron_i);
free(neuron_n);
free(neuron_n2);
hipFree(d_synapse);
hipFree(d_neuron_i);
hipFree(d_neuron_n);
hipFree(d_neuron_n2);
cout << "done\n";
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compile flags: nvcc convolution.cu -o conv -DNx=224 -DNy=224 -DKx=3 -DKy=3 -DNi=64 -DNn=64 -DTii=32 -DTi=16 -DTnn=32 -DTn=16 -DTx=7 -DTy=7
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | 47f445f7895d85441ea8facf0666ad2a3ec9d93c.cu | #include <iostream>
#include <string>
#include <stdio.h>
#include "dnn.hpp"
using namespace std;
//Define the parameters if not defined externally
#ifndef Sy
#define Sy 1
#define Sx 1
#endif
#ifndef Tnn
//Tiling Sizes
#define Tnn 32
#define Tn 16
#define Ti 16
#define Ty 8
#define Tx 8
#endif
#define NYPAD (Ny + Ky)
#define NXPAD (Nx + Kx)
#define NYSCL (Ny / Sy)
#define NXSCL (Nx / Sx)
#define SYNAPSE_SIZE (1L * Ky * Kx * Nn * Ni)
VTYPE (*synapse)
[Ky][Kx][Nn][Ni];
VTYPE (*neuron_i)
[NYPAD][NXPAD][Ni];
VTYPE (*neuron_n)
[NYSCL][NXSCL][Nn];
VTYPE (*neuron_n2)
[NYSCL][NXSCL][Nn];
void fill_convolution_shared_simple(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni])
{
for (int yy = 0; yy < Ky; ++yy)
{
for (int xx = 0; xx < Kx; ++xx)
{
for (int nn = 0; nn < Nn; ++nn)
{
for (int ni = 0; ni < Ni; ++ni)
{
synapse[yy][xx][nn][ni] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX) - 0.5f;
}
}
}
}
for (int yy = 0; yy < NYPAD; ++yy)
{
for (int xx = 0; xx < NXPAD; ++xx)
{
for (int ni = 0; ni < Ni; ++ni)
{
neuron_i[yy][xx][ni] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX) - 0.5f;
}
}
}
}
void convolution_layer_blocked(
VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn])
{
int c1 = 0, c2 = 0;
VTYPE sum[Nn] = {0};
for (int yy = 0; yy < Ny; yy += Ty)
{
for (int xx = 0; xx < Nx; xx += Tx)
{
for (int nnn = 0; nnn < Nn; nnn += Tnn)
{
int yout = yy / Sy;
for (int y = yy; y < yy + Ty; y += Sy)
{ // tiling for y;
int xout = xx / Sx;
for (int x = xx; x < xx + Tx; x += Sx)
{ // tiling for x;
for (int nn = nnn; nn < nnn + Tnn; nn += Tn)
{
for (int n = nn; n < nn + Tn; n++)
{
sum[n] = 0;
}
for (int ky = 0; ky < Ky; ky++)
{ // sliding window;
for (int kx = 0; kx < Kx; kx++)
{
int ii = 0;
VTYPE sum_sc;
for (; ii < Ni - Ti + 1; ii += Ti)
{
for (int n = nn; n < nn + Tn; n++)
{
sum_sc = 0;
for (int i = ii; i < ii + Ti; i++)
{
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum_sc += sv * nv;
}
sum[n] += sum_sc;
}
}
}
}
//transfer
for (int n = nn; n < nn + Tn; n++)
{
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
}
}
}
void convolution_layer(VTYPE (&synapse)[Ky][Kx][Nn][Ni],
VTYPE (&neuron_i)[NYPAD][NXPAD][Ni],
VTYPE (&neuron_n)[NYSCL][NXSCL][Nn])
{
VTYPE sum[Nn] = {0};
// — Original code — (excluding nn, ii loops)
int yout = 0;
for (int y = 0; y < Ny; y += Sy)
{ // tiling for y;
int xout = 0;
for (int x = 0; x < Ny; x += Sx)
{ // tiling for x;
for (int nn = 0; nn < Nn; nn += Tn)
{
for (int n = nn; n < nn + Tn; n++)
{
sum[n] = 0;
}
// sliding window;
for (int ky = 0; ky < Ky; ky++)
for (int kx = 0; kx < Kx; kx++)
for (int n = nn; n < nn + Tn; n++)
for (int i = 0; i < Ni; i++)
{
VTYPE sv = synapse[ky][kx][n][i];
VTYPE nv = neuron_i[ky + y][kx + x][i];
sum[n] += sv * nv;
}
for (int n = nn; n < nn + Tn; n++)
{
neuron_n[yout][xout][n] = transfer(sum[n]);
}
}
xout++;
}
yout++;
}
}
__global__ void convolution_layer_cuda(VTYPE* synapse, VTYPE* neuron_i, VTYPE* neuron_n)
{
VTYPE sum[Nn] = {0};
// — Original code — (excluding nn, ii loops)
int yout = 0;
int index = threadIdx.x;
int stride = blockDim.x;
for (int y = index; y < Ny; y += stride)
{ // tiling for y;
for (int x = 0; x < Ny; x += Sx)
{ // tiling for x;
for (int nn = 0; nn < Nn; nn += Tn)
{
for (int n = nn; n < nn + Tn; n++)
{
sum[n] = 0;
}
// sliding window;
for (int ky = 0; ky < Ky; ky++)
for (int kx = 0; kx < Kx; kx++)
for (int n = nn; n < nn + Tn; n++)
for (int i = 0; i < Ni; i++)
{
VTYPE sv = synapse[ky * (Kx * Nn * Ni) + kx * (Nn * Ni) + n * Ni + i]; //[ky][kx][n][i];
VTYPE nv = neuron_i[(ky + y) * (NXPAD * Ni) + (kx + x) * Ni + i]; //[ky + y][kx + x][i];
sum[n] += sv * nv;
}
for (int n = nn; n < nn + Tn; n++)
{
neuron_n[y * (NXSCL * Nn) + x * Nn + n] = (sum[n]>0) ? sum[n] : sum[n]/4;
//printf("yout: %d, xout: %d, index: %d, neuron:%f\n", y, x, index, neuron_n[y * (NXSCL * Nn) + x * Nn + n]);
}
}
}
}
}
int main(const int argc, const char **argv)
{
cout << "allocating memory\n";
synapse = (VTYPE(*)[Ky][Kx][Nn][Ni])malloc(SYNAPSE_SIZE * sizeof(VTYPE));
neuron_i = (VTYPE(*)[NYPAD][NXPAD][Ni])malloc(NYPAD * NXPAD * Ni * sizeof(VTYPE));
neuron_n = (VTYPE(*)[NYSCL][NXSCL][Nn])malloc(NYSCL * NXSCL * Nn * sizeof(VTYPE));
neuron_n2 = (VTYPE(*)[NYSCL][NXSCL][Nn])malloc(NYSCL * NXSCL * Nn * sizeof(VTYPE));
cout << "allocating memory for CUDA \n";
VTYPE* d_synapse = NULL;
VTYPE* d_neuron_i = NULL;
VTYPE* d_neuron_n = NULL;
VTYPE* d_neuron_n2 = NULL;
cudaMalloc((void**) &d_synapse, SYNAPSE_SIZE * sizeof(VTYPE));
cudaMalloc((void**) &d_neuron_i, NYPAD * NXPAD * Ni * sizeof(VTYPE));
cudaMalloc((void**) &d_neuron_n, NYSCL * NXSCL * Nn * sizeof(VTYPE));
// reserved for blocked version of the conv layer
cudaMalloc((void**) &d_neuron_n2, NYSCL * NXSCL * Nn * sizeof(VTYPE));
cout << "initializing arrays\n";
fill_convolution_shared_simple(*synapse, *neuron_i);
cout << "copying initialized arrays from host to device\n";
cudaMemcpy(d_synapse, synapse, SYNAPSE_SIZE * sizeof(VTYPE), cudaMemcpyHostToDevice);
cudaMemcpy(d_neuron_i, neuron_i, NYPAD * NXPAD * Ni * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "starting computation\n";
// simple Version
convolution_layer(*synapse, *neuron_i, *neuron_n);
cout << "simple version complete!\n";
// simple CUDA version
convolution_layer_cuda<<<1,256>>>(d_synapse, d_neuron_i, d_neuron_n);
cudaDeviceSynchronize();
cudaMemcpy(neuron_n2, d_neuron_n, NYSCL * NXSCL * Nn * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "cuda simple version complete!\n";
// //Blocked Version
// convolution_layer_blocked(*synapse, *neuron_i, *neuron_n2);
// cout << "blocked computation complete!\n";
// verify the results
compare((VTYPE *)*neuron_n, (VTYPE *)*neuron_n2, NYSCL * NXSCL * Nn);
free(synapse);
free(neuron_i);
free(neuron_n);
free(neuron_n2);
cudaFree(d_synapse);
cudaFree(d_neuron_i);
cudaFree(d_neuron_n);
cudaFree(d_neuron_n2);
cout << "done\n";
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compile flags: nvcc convolution.cu -o conv -DNx=224 -DNy=224 -DKx=3 -DKy=3 -DNi=64 -DNn=64 -DTii=32 -DTi=16 -DTnn=32 -DTn=16 -DTx=7 -DTy=7
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
8e353bf0cefd9b054141f8ec20307bb5d3d4ba9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include "stdio.h"
int get_max_size(int n, int d) {
int size = n/d;
if (n%d !=0 )
{
size = size+1;
}
return size;
}
__global__ void localHistograms (const unsigned int* const input,
unsigned int *output,
const int size,
int numBins,
int perThreadReads)
{
extern __shared__ unsigned int sdata2[];
int myX = blockDim.x*blockIdx.x + threadIdx.x;
//int tid = threadIdx.x;
//printf("myX is %d and myX*perThreadReads is %d and size is %d\n",myX,myX*perThreadReads,size);
sdata2[threadIdx.x] = 0;
if(myX < size)
{
atomicAdd(&sdata2[input[myX]],1);
}
output[blockIdx.x*numBins+threadIdx.x] = sdata2[threadIdx.x];
}
__global__
void histoReduce(int numBins,
unsigned int *input,
unsigned int *output,
int size,
int numHistReducePerBlock)
{
extern __shared__ unsigned int sdata3[];
int myX = blockDim.x*blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
for (int i=0;i<numHistReducePerBlock;i++)
{
int index = (blockIdx.x*numHistReducePerBlock+i)*numBins+threadIdx.x;
if (blockIdx.x+i >= size)
{
sdata3[i*numBins + i] = 0;
}
else
{
sdata3[i*numBins + i] = input[index];
}
}
__syncthreads();
if (myX >= size)
{
output[blockIdx.x*numBins+threadIdx.x] = 0;
}
for (unsigned int s = numHistReducePerBlock/2; s > 0; s/=2)
{
for (int i=0;i<s;i++)
{
sdata3[i*numBins+tid] = sdata3[i*numBins+tid] + sdata3[i*numBins+s*numBins+tid];
}
__syncthreads();
}
output[blockIdx.x*numBins+threadIdx.x] = sdata3[threadIdx.x];
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
unsigned int *d_histBins;
int numThreadsPerBlock = 1024;
printf("Size is %d\n",numElems);
int perThreadReads = 1;
dim3 blockDim(numThreadsPerBlock);
dim3 gridDim(get_max_size(numElems,numThreadsPerBlock));
int sizeOfBins = numBins*sizeof(unsigned int)*gridDim.x;
printf("Grid Dimension %d\n",gridDim.x);
checkCudaErrors(hipMalloc(&d_histBins, sizeOfBins));
printf("Numbins - %d\n",numBins);
hipLaunchKernelGGL(( localHistograms), dim3(gridDim),dim3(blockDim),numBins*sizeof(unsigned int), 0, d_vals,d_histBins,numElems,numBins,perThreadReads);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
int size = gridDim.x;
int localHistThreadsPerBlock = 1024;
dim3 blockDimLocalHist(localHistThreadsPerBlock);
unsigned int* d_curr_in;
d_curr_in = d_histBins;
unsigned int* d_curr_out;
int numHistReducePerBlock = 8;
while (size != 1 )
{
dim3 gridDimLocalHist(get_max_size(size,numHistReducePerBlock));
int sharedMemorySize = numBins*sizeof(unsigned int)*numHistReducePerBlock;
printf("Histogram Reduce - Block Size - %d with size - %d and shmem - %d\n",gridDimLocalHist.x,size,sharedMemorySize);
//Allocate d_curr_out here
checkCudaErrors(hipMalloc(&d_curr_out, numBins*sizeof(unsigned int) * gridDimLocalHist.x));
//Call Kernel
hipLaunchKernelGGL(( histoReduce), dim3(gridDimLocalHist),dim3(blockDimLocalHist),sharedMemorySize, 0, numBins,d_curr_in,d_curr_out,size,numHistReducePerBlock);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_curr_in));
d_curr_in = d_curr_out;
//Update size here
size = get_max_size(size,numHistReducePerBlock);
}
//checkCudaErrors(hipFree(d_histBins));
checkCudaErrors(hipMemcpy(d_histo, d_curr_out, sizeof(unsigned int)*numBins, hipMemcpyDeviceToDevice));
checkCudaErrors(hipFree(d_curr_out));
}
| 8e353bf0cefd9b054141f8ec20307bb5d3d4ba9c.cu | /* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include "stdio.h"
int get_max_size(int n, int d) {
int size = n/d;
if (n%d !=0 )
{
size = size+1;
}
return size;
}
__global__ void localHistograms (const unsigned int* const input,
unsigned int *output,
const int size,
int numBins,
int perThreadReads)
{
extern __shared__ unsigned int sdata2[];
int myX = blockDim.x*blockIdx.x + threadIdx.x;
//int tid = threadIdx.x;
//printf("myX is %d and myX*perThreadReads is %d and size is %d\n",myX,myX*perThreadReads,size);
sdata2[threadIdx.x] = 0;
if(myX < size)
{
atomicAdd(&sdata2[input[myX]],1);
}
output[blockIdx.x*numBins+threadIdx.x] = sdata2[threadIdx.x];
}
__global__
void histoReduce(int numBins,
unsigned int *input,
unsigned int *output,
int size,
int numHistReducePerBlock)
{
extern __shared__ unsigned int sdata3[];
int myX = blockDim.x*blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
for (int i=0;i<numHistReducePerBlock;i++)
{
int index = (blockIdx.x*numHistReducePerBlock+i)*numBins+threadIdx.x;
if (blockIdx.x+i >= size)
{
sdata3[i*numBins + i] = 0;
}
else
{
sdata3[i*numBins + i] = input[index];
}
}
__syncthreads();
if (myX >= size)
{
output[blockIdx.x*numBins+threadIdx.x] = 0;
}
for (unsigned int s = numHistReducePerBlock/2; s > 0; s/=2)
{
for (int i=0;i<s;i++)
{
sdata3[i*numBins+tid] = sdata3[i*numBins+tid] + sdata3[i*numBins+s*numBins+tid];
}
__syncthreads();
}
output[blockIdx.x*numBins+threadIdx.x] = sdata3[threadIdx.x];
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
unsigned int *d_histBins;
int numThreadsPerBlock = 1024;
printf("Size is %d\n",numElems);
int perThreadReads = 1;
dim3 blockDim(numThreadsPerBlock);
dim3 gridDim(get_max_size(numElems,numThreadsPerBlock));
int sizeOfBins = numBins*sizeof(unsigned int)*gridDim.x;
printf("Grid Dimension %d\n",gridDim.x);
checkCudaErrors(cudaMalloc(&d_histBins, sizeOfBins));
printf("Numbins - %d\n",numBins);
localHistograms<<<gridDim,blockDim,numBins*sizeof(unsigned int)>>>(d_vals,d_histBins,numElems,numBins,perThreadReads);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
int size = gridDim.x;
int localHistThreadsPerBlock = 1024;
dim3 blockDimLocalHist(localHistThreadsPerBlock);
unsigned int* d_curr_in;
d_curr_in = d_histBins;
unsigned int* d_curr_out;
int numHistReducePerBlock = 8;
while (size != 1 )
{
dim3 gridDimLocalHist(get_max_size(size,numHistReducePerBlock));
int sharedMemorySize = numBins*sizeof(unsigned int)*numHistReducePerBlock;
printf("Histogram Reduce - Block Size - %d with size - %d and shmem - %d\n",gridDimLocalHist.x,size,sharedMemorySize);
//Allocate d_curr_out here
checkCudaErrors(cudaMalloc(&d_curr_out, numBins*sizeof(unsigned int) * gridDimLocalHist.x));
//Call Kernel
histoReduce<<<gridDimLocalHist,blockDimLocalHist,sharedMemorySize>>>(numBins,d_curr_in,d_curr_out,size,numHistReducePerBlock);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_curr_in));
d_curr_in = d_curr_out;
//Update size here
size = get_max_size(size,numHistReducePerBlock);
}
//checkCudaErrors(cudaFree(d_histBins));
checkCudaErrors(cudaMemcpy(d_histo, d_curr_out, sizeof(unsigned int)*numBins, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaFree(d_curr_out));
}
|
sync_all_gpus_functor.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
void SparseEmbeddingFunctors::sync_all_gpus(const ResourceManager& resource_manager) const {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto& local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(hipStreamSynchronize(local_gpu->get_stream()));
}
}
} // namespace HugeCTR | sync_all_gpus_functor.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
void SparseEmbeddingFunctors::sync_all_gpus(const ResourceManager& resource_manager) const {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto& local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaStreamSynchronize(local_gpu->get_stream()));
}
}
} // namespace HugeCTR |
878a8e05f341ba4a949f447ee20f9962d708a851.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col, const int bit_align)
{
//__shared__ float tmp_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
//float* data_col_ptr = data_col;
//float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
//data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr = &data_col[channel_out * bit_align + h_out * width_col + w_out];
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
float val = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
int pre_out_index = index % (width_col*height_col);
int out_index = (channel_out + i*ksize + j) * bit_align + pre_out_index;// h_out * width_col + w_out;
data_col[out_index] = val;
//*data_col_ptr = val;
//dst_s[threadIdx.x] = val;
//tmp_s[0] = val;
//*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
// data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
// use atomicOr() // *dst_ptr |= (mask << (col_index % 8));
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
} | 878a8e05f341ba4a949f447ee20f9962d708a851.cu | #include "includes.h"
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col, const int bit_align)
{
//__shared__ float tmp_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
//float* data_col_ptr = data_col;
//float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
//data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr = &data_col[channel_out * bit_align + h_out * width_col + w_out];
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
float val = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
int pre_out_index = index % (width_col*height_col);
int out_index = (channel_out + i*ksize + j) * bit_align + pre_out_index;// h_out * width_col + w_out;
data_col[out_index] = val;
//*data_col_ptr = val;
//dst_s[threadIdx.x] = val;
//tmp_s[0] = val;
//*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
// data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
// use atomicOr() // *dst_ptr |= (mask << (col_index % 8));
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
} |
70ddd44267366ae5a93e7afecb2a3a31368e63a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "differenceImg_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
differenceImg_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
differenceImg_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
differenceImg_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 70ddd44267366ae5a93e7afecb2a3a31368e63a4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "differenceImg_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
differenceImg_gpu<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
differenceImg_gpu<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
differenceImg_gpu<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
822e69b35f2a157e68c52cf78ba4aa46eb2f196b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/NumericLimits.cuh>
#include <THH/THHNumerics.cuh>
#include <ATen/hip/HIPContext.h>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_orows, int num_irows, int row_size, scalar_t init, BinaryFunction binary_op) {
for (int orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (int irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (int64_t col = 0; col < row_size; ++col) {
if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
int num_orows = std::accumulate(sizes.begin(), sizes.begin() + dim, 1, std::multiplies<int>());
// Treat all inner dimensions (i.e. dim > dimension) as one.
int num_irows = std::accumulate(sizes.begin() + dim + 1, sizes.end(), 1, std::multiplies<int>());
dim3 threads(::min(512, int(num_irows)));
int maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int(threads.x))));
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim_with_indices<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
AT_CUDA_CHECK(hipGetLastError());
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
AT_CUDA_CHECK(hipGetLastError());
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
Tensor values_ = values.contiguous();
Tensor indices_ = indices.contiguous();
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(self, values, indices, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(self, values, indices, dim, init, binary_op);
}
}
void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummax", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummin", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
unsigned num_orows, unsigned num_irows, unsigned row_size,
scalar_t init, BinaryOp binary_op)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (unsigned col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim(T *tgt_, T *src_,
unsigned num_rows, unsigned row_size,
T init, BinaryFunction binary_op)
{
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
for (unsigned block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (unsigned block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
unsigned col1 = block_col + threadIdx.x;
unsigned col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (unsigned s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
unsigned offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (unsigned s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
unsigned offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<unsigned>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit unsigned value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const Tensor& self, Tensor& result,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
int64_t num_orows = std::accumulate(sizes.begin(), sizes.begin() + dim, 1, std::multiplies<int64_t>());
// Treat all inner dimensions (i.e. dim > dimension) as one.
int64_t num_irows = std::accumulate(sizes.begin() + dim + 1, sizes.end(), 1, std::multiplies<int64_t>());
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
AT_CUDA_CHECK(hipGetLastError());
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
AT_CUDA_CHECK(hipGetLastError());
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const Tensor& self, Tensor& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
result = result.contiguous();
if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(self_, result, init, binary_op);
} else {
scan_outer_dim<scalar_t>(self_, result, dim, init, binary_op);
}
}
Tensor& _logcumsumexp_out_cuda(Tensor& result, const Tensor& self, int64_t dim) {
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
TensorArg output_arg{ result, "output", 1 };
TensorArg input_arg{ self, "input", 2 };
checkAllSameGPU("logcumsumexp", {output_arg, input_arg});
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "logcumsumexp_cuda", [&]() {
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] __device__ (scalar_t x, scalar_t y) -> scalar_t {
return ::log1p(::exp(::min(x, y) - ::max(x, y))) +
::max(x, y);
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
return result;
}
Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cuda(result, self, dim);
}
Tensor& _cumsum_out_cuda(Tensor& result, const Tensor& self, int64_t dim) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumsum", {output_arg, input_arg});
checkSameType("cumsum", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "cumsum_cuda", [&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::plus<scalar_t>());
});
return result;
}
Tensor _cumsum_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _cumsum_out_cuda(result, self, dim);
}
Tensor& _cumprod_out_cuda(Tensor& result, const Tensor& self, int64_t dim) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumprod", {output_arg, input_arg});
checkSameType("cumprod", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::multiplies<scalar_t>());
});
return result;
}
Tensor _cumprod_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _cumprod_out_cuda(result, self, dim);
}
}} // namespace at::native
| 822e69b35f2a157e68c52cf78ba4aa46eb2f196b.cu | #include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <THC/THCNumerics.cuh>
#include <ATen/cuda/CUDAContext.h>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_orows, int num_irows, int row_size, scalar_t init, BinaryFunction binary_op) {
for (int orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (int irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (int64_t col = 0; col < row_size; ++col) {
if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
int num_orows = std::accumulate(sizes.begin(), sizes.begin() + dim, 1, std::multiplies<int>());
// Treat all inner dimensions (i.e. dim > dimension) as one.
int num_irows = std::accumulate(sizes.begin() + dim + 1, sizes.end(), 1, std::multiplies<int>());
dim3 threads(std::min(512, int(num_irows)));
int maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int(threads.x))));
tensor_kernel_scan_outer_dim_with_indices<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
AT_CUDA_CHECK(cudaGetLastError());
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
Tensor values_ = values.contiguous();
Tensor indices_ = indices.contiguous();
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(self, values, indices, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(self, values, indices, dim, init, binary_op);
}
}
void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummax", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummin", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
unsigned num_orows, unsigned num_irows, unsigned row_size,
scalar_t init, BinaryOp binary_op)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (unsigned col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim(T *tgt_, T *src_,
unsigned num_rows, unsigned row_size,
T init, BinaryFunction binary_op)
{
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
for (unsigned block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (unsigned block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
unsigned col1 = block_col + threadIdx.x;
unsigned col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (unsigned s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
unsigned offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (unsigned s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
unsigned offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<unsigned>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit unsigned value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const Tensor& self, Tensor& result,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
int64_t num_orows = std::accumulate(sizes.begin(), sizes.begin() + dim, 1, std::multiplies<int64_t>());
// Treat all inner dimensions (i.e. dim > dimension) as one.
int64_t num_irows = std::accumulate(sizes.begin() + dim + 1, sizes.end(), 1, std::multiplies<int64_t>());
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_outer_dim<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_innermost_dim<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
AT_CUDA_CHECK(cudaGetLastError());
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const Tensor& self, Tensor& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
result = result.contiguous();
if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(self_, result, init, binary_op);
} else {
scan_outer_dim<scalar_t>(self_, result, dim, init, binary_op);
}
}
Tensor& _logcumsumexp_out_cuda(Tensor& result, const Tensor& self, int64_t dim) {
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
TensorArg output_arg{ result, "output", 1 };
TensorArg input_arg{ self, "input", 2 };
checkAllSameGPU("logcumsumexp", {output_arg, input_arg});
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "logcumsumexp_cuda", [&]() {
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] __device__ (scalar_t x, scalar_t y) -> scalar_t {
return ::log1p(std::exp(std::min(x, y) - std::max(x, y))) +
std::max(x, y);
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
return result;
}
Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cuda(result, self, dim);
}
Tensor& _cumsum_out_cuda(Tensor& result, const Tensor& self, int64_t dim) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumsum", {output_arg, input_arg});
checkSameType("cumsum", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "cumsum_cuda", [&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::plus<scalar_t>());
});
return result;
}
Tensor _cumsum_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _cumsum_out_cuda(result, self, dim);
}
Tensor& _cumprod_out_cuda(Tensor& result, const Tensor& self, int64_t dim) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumprod", {output_arg, input_arg});
checkSameType("cumprod", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::multiplies<scalar_t>());
});
return result;
}
Tensor _cumprod_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _cumprod_out_cuda(result, self, dim);
}
}} // namespace at::native
|
1600bdf85c1a9cbabcf1868abe52dbaccbdab739.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 1600bdf85c1a9cbabcf1868abe52dbaccbdab739.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
229a3093df2d447e836f9c081f34bce91e90af9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "local_config.h"
#include "common_hip.cuh"
#include <string.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t twoLayerBP(Matrix &pGradW1, Matrix &pGradW2, Matrix &pGradb1, Matrix &pGradb2, const Matrix &input, const Matrix &delta3, const Matrix &W1, const Matrix &W2, const Matrix &a2, const Matrix &rho);
int main_2bp()
{
const char *base_dir = BASE_DIR;
const char *test_name = "test_2bp";
char filename[256];
char *input_suffiex = "";
char *res_suffiex = "res";
Matrix pGradW1, pGradW2, pGradb1, pGradb2, input, delta3, W1, W2, a2, rho;
int res = 0;
//'pGradW1', 'pGradW2', 'pGradb1', 'pGradb2', 'input', ...
// 'delta3', 'W1', 'W2', 'a2'
hipError_t cudaStatus;
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradW1); // just for a2's size
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradW2); // just for a3's size
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradb1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradb2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, input);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, delta3);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, a2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, rho);
// Add vectors in parallel.
cudaStatus = twoLayerBP(pGradW1, pGradW2, pGradb1, pGradb2, input, delta3, W1, W2, a2, rho);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "twoLayerBP failed!\n");
res = -1;
goto real_exit;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!\n");
res = -1;
goto real_exit;
}
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradW1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradW2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradb1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradb2);
real_exit:
free_matrix(pGradW1);
free_matrix(pGradW2);
free_matrix(pGradb1);
free_matrix(pGradb2);
free_matrix(input);
free_matrix(delta3);
free_matrix(W1);
free_matrix(W2);
free_matrix(a2);
return res;
}
/*
int gpu_twolayer_bp(const Matrix &d_input, // INPUT
const Matrix &d_rho, // INPUT
const Matrix &d_W1, // INPUT
const Matrix &d_W2, // INPUT
const Matrix &d_a2, // INPUT
const Matrix &d_a3, // INPUT
Matrix &d_sparsity_der, // AUX
Matrix &d_delta2, // AUX
Matrix &d_delta3, // INPUT
Matrix &d_pGradW1, // OUTPUT
Matrix &d_pGradW2, // OUTPUT
Matrix &d_pGradb1, // OUTPUT
Matrix &d_pGradb2, // OUTPUT
gHandler_t *handle) // INPUT
*/
// Helper function for using CUDA to add vectors in parallel.
hipError_t twoLayerBP(Matrix &pGradW1, Matrix &pGradW2, Matrix &pGradb1, Matrix &pGradb2, const Matrix &input, const Matrix &delta3, const Matrix &W1, const Matrix &W2, const Matrix &a2, const Matrix &rho)
{
hipError_t cudaStatus;
gHandler_t * handle = NULL;
hipblasStatus_t status;
int dInput = input.row;
int nSamples = input.col;
int dHidden = a2.row;
int dOutput = dInput;
int i, niter = 500;
clock_t startTime, stopTime, elapsedTime;
Matrix d_input, d_rho, d_W1, d_W2, d_a2, d_a3, d_sparsity_der, d_delta2, d_delta3, d_pGradW1, d_pGradW2, d_pGradb1, d_pGradb2;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
// Allocate GPU buffers
CUDA_ZERO_MATRIX(d_pGradW1, pGradW1);
CUDA_ZERO_MATRIX(d_pGradW2, pGradW2);
CUDA_ZERO_MATRIX(d_pGradb1, pGradb1);
CUDA_ZERO_MATRIX(d_pGradb2, pGradb2);
CUDA_CLONE_MATRIX(d_input, input);
CUDA_CLONE_MATRIX(d_delta3, delta3);
CUDA_CLONE_MATRIX(d_W1, W1);
CUDA_CLONE_MATRIX(d_W2, W2);
CUDA_CLONE_MATRIX(d_a2, a2);
CUDA_CLONE_MATRIX(d_rho, rho);
CUDA_ZEROS(d_a3, dOutput, nSamples);
CUDA_ZEROS(d_sparsity_der, a2.row, 1);
CUDA_ZEROS(d_delta2, dHidden, nSamples); // % dHidden * nSamples
handle = createGlobalHandle(nSamples, dInput, dHidden);
fprintf(stderr, "gpu_twolayer_bp\n");
fflush(stderr);
startTime = clock();
for (i = 0; i < niter; ++i)
if (gpu_twolayer_bp(d_input, d_rho, d_W1, d_W2, d_a2, d_a3, d_sparsity_der, d_delta2, d_delta3, d_pGradW1, d_pGradW2, d_pGradb1, d_pGradb2, handle) == -1)
{
cudaStatus = hipErrorLaunchFailure;
fprintf(stderr, "gpu_twolayer_bp error\n");
}
else
{
CUDA_FETCH_MATRIX(pGradW1, d_pGradW1);
CUDA_FETCH_MATRIX(pGradW2, d_pGradW2);
CUDA_FETCH_MATRIX(pGradb1, d_pGradb1);
CUDA_FETCH_MATRIX(pGradb2, d_pGradb2);
}
stopTime = clock();
elapsedTime = stopTime - startTime;
printf("OWLQN Optimization takes: %5.2f s \n", ((float)elapsedTime/CLOCKS_PER_SEC));
printf("Number of Evaluation: %d\n", niter);
Error:
destroyGlobalHandle(&handle);
hipFree(d_rho.elements);
hipFree(d_sparsity_der.elements);
hipFree(d_delta2.elements);
hipFree(d_delta3.elements);
hipFree(d_a2.elements);
hipFree(d_a3.elements);
hipFree(d_input.elements);
hipFree(d_W1.elements);
hipFree(d_W2.elements);
hipFree(d_pGradW1.elements);
hipFree(d_pGradW2.elements);
hipFree(d_pGradb1.elements);
hipFree(d_pGradb2.elements);
return cudaStatus;
}
| 229a3093df2d447e836f9c081f34bce91e90af9c.cu | #include "local_config.h"
#include "common.cuh"
#include <string.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t twoLayerBP(Matrix &pGradW1, Matrix &pGradW2, Matrix &pGradb1, Matrix &pGradb2, const Matrix &input, const Matrix &delta3, const Matrix &W1, const Matrix &W2, const Matrix &a2, const Matrix &rho);
int main_2bp()
{
const char *base_dir = BASE_DIR;
const char *test_name = "test_2bp";
char filename[256];
char *input_suffiex = "";
char *res_suffiex = "res";
Matrix pGradW1, pGradW2, pGradb1, pGradb2, input, delta3, W1, W2, a2, rho;
int res = 0;
//'pGradW1', 'pGradW2', 'pGradb1', 'pGradb2', 'input', ...
// 'delta3', 'W1', 'W2', 'a2'
cudaError_t cudaStatus;
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradW1); // just for a2's size
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradW2); // just for a3's size
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradb1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, pGradb2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, input);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, delta3);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, a2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, rho);
// Add vectors in parallel.
cudaStatus = twoLayerBP(pGradW1, pGradW2, pGradb1, pGradb2, input, delta3, W1, W2, a2, rho);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "twoLayerBP failed!\n");
res = -1;
goto real_exit;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!\n");
res = -1;
goto real_exit;
}
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradW1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradW2);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradb1);
IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, pGradb2);
real_exit:
free_matrix(pGradW1);
free_matrix(pGradW2);
free_matrix(pGradb1);
free_matrix(pGradb2);
free_matrix(input);
free_matrix(delta3);
free_matrix(W1);
free_matrix(W2);
free_matrix(a2);
return res;
}
/*
int gpu_twolayer_bp(const Matrix &d_input, // INPUT
const Matrix &d_rho, // INPUT
const Matrix &d_W1, // INPUT
const Matrix &d_W2, // INPUT
const Matrix &d_a2, // INPUT
const Matrix &d_a3, // INPUT
Matrix &d_sparsity_der, // AUX
Matrix &d_delta2, // AUX
Matrix &d_delta3, // INPUT
Matrix &d_pGradW1, // OUTPUT
Matrix &d_pGradW2, // OUTPUT
Matrix &d_pGradb1, // OUTPUT
Matrix &d_pGradb2, // OUTPUT
gHandler_t *handle) // INPUT
*/
// Helper function for using CUDA to add vectors in parallel.
cudaError_t twoLayerBP(Matrix &pGradW1, Matrix &pGradW2, Matrix &pGradb1, Matrix &pGradb2, const Matrix &input, const Matrix &delta3, const Matrix &W1, const Matrix &W2, const Matrix &a2, const Matrix &rho)
{
cudaError_t cudaStatus;
gHandler_t * handle = NULL;
cublasStatus_t status;
int dInput = input.row;
int nSamples = input.col;
int dHidden = a2.row;
int dOutput = dInput;
int i, niter = 500;
clock_t startTime, stopTime, elapsedTime;
Matrix d_input, d_rho, d_W1, d_W2, d_a2, d_a3, d_sparsity_der, d_delta2, d_delta3, d_pGradW1, d_pGradW2, d_pGradb1, d_pGradb2;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
// Allocate GPU buffers
CUDA_ZERO_MATRIX(d_pGradW1, pGradW1);
CUDA_ZERO_MATRIX(d_pGradW2, pGradW2);
CUDA_ZERO_MATRIX(d_pGradb1, pGradb1);
CUDA_ZERO_MATRIX(d_pGradb2, pGradb2);
CUDA_CLONE_MATRIX(d_input, input);
CUDA_CLONE_MATRIX(d_delta3, delta3);
CUDA_CLONE_MATRIX(d_W1, W1);
CUDA_CLONE_MATRIX(d_W2, W2);
CUDA_CLONE_MATRIX(d_a2, a2);
CUDA_CLONE_MATRIX(d_rho, rho);
CUDA_ZEROS(d_a3, dOutput, nSamples);
CUDA_ZEROS(d_sparsity_der, a2.row, 1);
CUDA_ZEROS(d_delta2, dHidden, nSamples); // % dHidden * nSamples
handle = createGlobalHandle(nSamples, dInput, dHidden);
fprintf(stderr, "gpu_twolayer_bp\n");
fflush(stderr);
startTime = clock();
for (i = 0; i < niter; ++i)
if (gpu_twolayer_bp(d_input, d_rho, d_W1, d_W2, d_a2, d_a3, d_sparsity_der, d_delta2, d_delta3, d_pGradW1, d_pGradW2, d_pGradb1, d_pGradb2, handle) == -1)
{
cudaStatus = cudaErrorLaunchFailure;
fprintf(stderr, "gpu_twolayer_bp error\n");
}
else
{
CUDA_FETCH_MATRIX(pGradW1, d_pGradW1);
CUDA_FETCH_MATRIX(pGradW2, d_pGradW2);
CUDA_FETCH_MATRIX(pGradb1, d_pGradb1);
CUDA_FETCH_MATRIX(pGradb2, d_pGradb2);
}
stopTime = clock();
elapsedTime = stopTime - startTime;
printf("OWLQN Optimization takes: %5.2f s \n", ((float)elapsedTime/CLOCKS_PER_SEC));
printf("Number of Evaluation: %d\n", niter);
Error:
destroyGlobalHandle(&handle);
cudaFree(d_rho.elements);
cudaFree(d_sparsity_der.elements);
cudaFree(d_delta2.elements);
cudaFree(d_delta3.elements);
cudaFree(d_a2.elements);
cudaFree(d_a3.elements);
cudaFree(d_input.elements);
cudaFree(d_W1.elements);
cudaFree(d_W2.elements);
cudaFree(d_pGradW1.elements);
cudaFree(d_pGradW2.elements);
cudaFree(d_pGradb1.elements);
cudaFree(d_pGradb2.elements);
return cudaStatus;
}
|
fc715505122c3dcce7de09c03f972a8c43b05502.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "repeat.h"
#define CARRAY_SIZE 16384
__constant__ unsigned int d_carray[CARRAY_SIZE];
unsigned int h_test[CARRAY_SIZE];
__global__ void kclat (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
unsigned int p;
int p_start = (blockIdx.x == 0) ? 0 : 8256;
if (((1<<blockIdx.x) & p1) == 0) return;
for (int j=0;j<2;j++)
{
p = p_start;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat256(p = d_carray[p];)
}
end_time = clock();
}
t1 = p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
__global__ void kcicache_interfere (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
unsigned int p;
if (((1<<blockIdx.x) & p1) == 0) return;
if (blockIdx.x == 0)
{
for (int j=0;j<2;j++)
{
p = 0;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat256(p = d_carray[p];)
}
end_time = clock();
}
}
else
{
int its = its2 * 4;
for (int i=0;i<its;i++) {
repeat159(t1 = abs(t2); t2 = abs(t3); t3=abs(t4); t4=abs(t5); t5=abs(t6); t6=abs(t7); t7=abs(t8);t8=abs(t1);)
}
}
t1 = p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
__global__ void kcbw (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
volatile int p;
if (blockIdx.x != 0) its2 *= 1.5f;
for (int j=0;j<2;j++)
{
p = ((blockIdx.x)&4095)*64;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat32(t1 += d_carray[p]; t2+=d_carray[p+512]; t3+=d_carray[p+1024]; t4+=d_carray[p+1536];
t5+=d_carray[p+2048]; t6+=d_carray[p+2560]; t7+=d_carray[p+3072]; t8+=d_carray[p+3584];
)
}
end_time = clock();
}
t1 += p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
__global__ void kcbw_8t (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
volatile int p;
if (blockIdx.x != 0) its2 *= 1.5f;
for (int j=0;j<2;j++)
{
p = threadIdx.x*64+((blockIdx.x/10)&1)*4096;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat32(t1 += d_carray[p]; t2+=d_carray[p+512]; t3+=d_carray[p+1024]; t4+=d_carray[p+1536];
t5+=d_carray[p+2048]; t6+=d_carray[p+2560]; t7+=d_carray[p+3072]; t8+=d_carray[p+3584];
)
}
end_time = clock();
}
t1 += p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
void cmem_stride(unsigned int *h_carray, unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, int stride, int min_size, int max_size, int step_size)
{
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
hipError_t errcode;
printf ("Constant memory, %d-byte stride\n", stride*4);
printf (" [array size]: [clocks per read], [max], [min]\n");
for (int size = min_size; size <= max_size; size+=step_size)
{
// Set up array contents
for (int i=0;i<size;i++)
{
h_carray[i] = i+stride;
if (h_carray[i] >= size)
h_carray[i] %= stride;
}
hipMemcpyToSymbol(d_carray, h_carray, CARRAY_SIZE*4);
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
hipLaunchKernelGGL(( kclat), dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, 1,3, its);
errcode = hipGetLastError();
if (errcode != hipSuccess)
{
printf ("Failed: %s\n", hipGetErrorString(errcode));
}
hipDeviceSynchronize();
hipMemcpy(ts, d_ts, 16, hipMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
}
printf (" %d: %.3f, %.3f, %.3f clk\n", size*4,
sum_time/(kits*its*256.0),
min_time/(its*256.0),
max_time/(its*256.0));
}
printf ("\n");
}
void cmem_stride_2 (unsigned int *h_carray, unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, int stride, int min_size, int max_size, int step_size, unsigned int exec_mask)
{
dim3 Db = dim3(1);
dim3 Dg = dim3(31,1,1);
hipError_t errcode;
if (max_size > 8128)
{
printf ("Size %d too big. Must be <= 8128 elements\n", max_size);
return;
}
printf ("Constant memory, %d-byte stride, blocks [", stride);
for (int i=0;i<31;i++)
if ((1<<i)&exec_mask) printf (" %d", i);
printf (" ]\n");
printf (" [array size]: [clocks per read], [max], [min]\n");
for (int size = min_size; size <= max_size; size+=step_size)
{
// Set up array contents
for (int i=0;i<size;i++)
{
h_carray[i] = i+stride;
if (h_carray[i] >= size)
h_carray[i] %= stride;
h_carray[8256+i] = 8256+i+stride;
if (h_carray[8256+i] >= 8256+size)
h_carray[8256+i] = 8256 + (h_carray[8256+i]%stride);
}
hipMemcpyToSymbol(d_carray, h_carray, CARRAY_SIZE*4);
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
hipLaunchKernelGGL(( kclat), dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, exec_mask, 3, its);
errcode = hipGetLastError();
if (errcode != hipSuccess)
{
printf ("Failed: %s\n", hipGetErrorString(errcode));
}
hipDeviceSynchronize();
hipMemcpy(ts, d_ts, 16, hipMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
}
printf (" %d: %.3f, %.3f, %.3f clk\n", size*4,
sum_time/(kits*its*256.0),
min_time/(its*256.0),
max_time/(its*256.0));
}
printf ("\n");
}
void cmem_icache_sharing (unsigned int *h_carray, unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, int stride, int min_size, int max_size, int step_size, unsigned int exec_mask)
{
dim3 Db = dim3(1);
dim3 Dg = dim3(31,1,1);
hipError_t errcode;
if (max_size > 16384)
{
printf ("Size %d too big. Must be <= 16384 elements\n", max_size);
return;
}
printf ("Constant memory and Icache sharing, %d-byte stride, blocks [", stride);
for (int i=0;i<31;i++)
if ((1<<i)&exec_mask) printf (" %d", i);
printf (" ]\n");
printf (" [array size]: [clocks per read], [max], [min]\n");
for (int size = min_size; size <= max_size; size+=step_size)
{
// Set up array contents
for (int i=0;i<size;i++)
{
h_carray[i] = i+stride;
if (h_carray[i] >= size)
h_carray[i] %= stride;
}
hipMemcpyToSymbol(d_carray, h_carray, CARRAY_SIZE*4);
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
hipLaunchKernelGGL(( kcicache_interfere), dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, exec_mask, 3, its);
errcode = hipGetLastError();
if (errcode != hipSuccess)
{
printf ("Failed: %s\n", hipGetErrorString(errcode));
}
hipDeviceSynchronize();
hipMemcpy(ts, d_ts, 16, hipMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
}
printf (" %d: %.3f, %.3f, %.3f clk\n", size*4,
sum_time/(kits*its*256.0),
min_time/(its*256.0),
max_time/(its*256.0));
}
printf ("\n");
}
void cmem_bandwidth (unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, unsigned int nblocks, int nthreads)
{
dim3 Db = dim3(nthreads);
dim3 Dg = dim3(nblocks,1,1);
dim3 Dgpad = dim3(0,1,1);
while ((Dg.x+Dgpad.x)%2 == 0 || (Dg.x+Dgpad.x)%5 == 0) Dgpad.x++;
hipError_t errcode;
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
if (nthreads == 1)
hipLaunchKernelGGL(( kcbw), dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, 0, 0, its);
else
hipLaunchKernelGGL(( kcbw_8t), dim3(Dg), dim3(Db), 0, 0, d_ts, d_out, 0, 0, its);
errcode = hipGetLastError();
if (errcode != hipSuccess)
{
printf ("Failed: %s\n", hipGetErrorString(errcode));
}
hipDeviceSynchronize();
hipMemcpy(ts, d_ts, 16, hipMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
if (Dgpad.x > 0)
{
hipLaunchKernelGGL(( kcbw), dim3(Dgpad), dim3(Db), 0, 0, d_ts, d_out, 0, 0, 1);
hipDeviceSynchronize();
}
}
printf (" %d: %.3f, %.3f, %.3f bytes/clk\n", nblocks,
(nblocks*kits*its*256.0*256.0*Db.x)/sum_time,
(nblocks*its*256.0*256.0*Db.x)/max_time,
(nblocks*its*256.0*256.0*Db.x)/min_time);
}
int main()
{
unsigned int ts[4096]; // ts, output from kernel. Two elements used per thread.
unsigned int *d_ts;
unsigned int *d_out; // Unused memory for storing output
unsigned int *h_carray;
// Allocate device array.
hipError_t errcode;
if (hipSuccess != (errcode = hipMalloc((void**)&d_ts, sizeof(ts))))
{
printf ("hipMalloc failed %s:%d\n", __FILE__, __LINE__);
printf (" %s\n", hipGetErrorString(errcode));
return -1;
}
if (hipSuccess != hipMalloc((void**)&d_out, 4))
{
printf ("hipMalloc failed %s:%d\n", __FILE__, __LINE__);
return -1;
}
h_carray = (unsigned int*)malloc(CARRAY_SIZE*4);
// Stride 256 overview
cmem_stride(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4);
// Stride 64 L2 and L3
cmem_stride(h_carray, d_ts, d_out, ts, 64/4, 6144/4, 40960/4, 64/4);
// Stride 16 L1
cmem_stride(h_carray, d_ts, d_out, ts, 16/4, 512-64, 512+192, 16/4);
// Different-TPC Sharing
printf ("Different TPC Testing ");
cmem_stride_2(h_carray, d_ts, d_out, ts, 256/4, 16, 8128, 256/4, 0x81);
// Same-TPC Sharing
printf ("Shared TPC Testing ");
cmem_stride_2(h_carray, d_ts, d_out, ts, 256/4, 16, 8128, 256/4, 0x401);
// Same-SM sharing, L1
printf ("Shared SM Testing ");
cmem_stride_2(h_carray, d_ts, d_out, ts, 256/4, 16, 8128, 256/4, 0x40000001);
// Instruction cache sharing
printf ("Different TPC ");
cmem_icache_sharing(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4, 0x81);
printf ("Shared TPC ");
cmem_icache_sharing(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4, 0x401);
printf ("Shared SM ");
cmem_icache_sharing(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4, 0x40000001);
// Constant cache bandwidth
printf ("Constant cache L3 bandwidth, blocks touching addresses 2KB apart\n");
for (int nblocks = 1; nblocks <= 60; nblocks++)
cmem_bandwidth (d_ts, d_out, ts, nblocks, 1);
printf ("\n");
printf ("Constant cache L3 bandwidth, 8 threads/warp\n");
for (int nblocks = 1; nblocks <= 60; nblocks++)
cmem_bandwidth (d_ts, d_out, ts, nblocks, 8);
printf ("\n");
hipFree(d_ts);
hipFree(d_out);
free(h_carray);
return 0;
}
| fc715505122c3dcce7de09c03f972a8c43b05502.cu | #include <stdio.h>
#include "repeat.h"
#define CARRAY_SIZE 16384
__constant__ unsigned int d_carray[CARRAY_SIZE];
unsigned int h_test[CARRAY_SIZE];
__global__ void kclat (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
unsigned int p;
int p_start = (blockIdx.x == 0) ? 0 : 8256;
if (((1<<blockIdx.x) & p1) == 0) return;
for (int j=0;j<2;j++)
{
p = p_start;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat256(p = d_carray[p];)
}
end_time = clock();
}
t1 = p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
__global__ void kcicache_interfere (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
unsigned int p;
if (((1<<blockIdx.x) & p1) == 0) return;
if (blockIdx.x == 0)
{
for (int j=0;j<2;j++)
{
p = 0;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat256(p = d_carray[p];)
}
end_time = clock();
}
}
else
{
int its = its2 * 4;
for (int i=0;i<its;i++) {
repeat159(t1 = abs(t2); t2 = abs(t3); t3=abs(t4); t4=abs(t5); t5=abs(t6); t6=abs(t7); t7=abs(t8);t8=abs(t1);)
}
}
t1 = p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
__global__ void kcbw (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
volatile int p;
if (blockIdx.x != 0) its2 *= 1.5f;
for (int j=0;j<2;j++)
{
p = ((blockIdx.x)&4095)*64;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat32(t1 += d_carray[p]; t2+=d_carray[p+512]; t3+=d_carray[p+1024]; t4+=d_carray[p+1536];
t5+=d_carray[p+2048]; t6+=d_carray[p+2560]; t7+=d_carray[p+3072]; t8+=d_carray[p+3584];
)
}
end_time = clock();
}
t1 += p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
__global__ void kcbw_8t (unsigned int *ts, unsigned int *out, int p1, int p2, int its2)
{
int t1 = p1; int t2 = p1*p1; int t3 = p1*p1+p1; int t4 = p1*p1+p2;
int t5 = p1*p2; int t6 = p1*p2+p1; int t7 = p1*p2+p2; int t8 = p2*p1*p2;
int start_time, end_time;
volatile int p;
if (blockIdx.x != 0) its2 *= 1.5f;
for (int j=0;j<2;j++)
{
p = threadIdx.x*64+((blockIdx.x/10)&1)*4096;
int its = (j==0)? 2 : its2;
start_time = clock();
for (int i=0;i<its;i++)
{
repeat32(t1 += d_carray[p]; t2+=d_carray[p+512]; t3+=d_carray[p+1024]; t4+=d_carray[p+1536];
t5+=d_carray[p+2048]; t6+=d_carray[p+2560]; t7+=d_carray[p+3072]; t8+=d_carray[p+3584];
)
}
end_time = clock();
}
t1 += p;
out[0] = t1+t2+t3+t4+t5+t6+t7+t8;
if ((threadIdx.x & 31) == 0)
{
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2] = start_time;
ts[(((blockIdx.x)*((blockDim.x+31)/32))+(threadIdx.x/32))*2 + 1] = end_time;
}
}
void cmem_stride(unsigned int *h_carray, unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, int stride, int min_size, int max_size, int step_size)
{
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
cudaError_t errcode;
printf ("Constant memory, %d-byte stride\n", stride*4);
printf (" [array size]: [clocks per read], [max], [min]\n");
for (int size = min_size; size <= max_size; size+=step_size)
{
// Set up array contents
for (int i=0;i<size;i++)
{
h_carray[i] = i+stride;
if (h_carray[i] >= size)
h_carray[i] %= stride;
}
cudaMemcpyToSymbol(d_carray, h_carray, CARRAY_SIZE*4);
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
kclat<<<Dg, Db>>> (d_ts, d_out, 1,3, its);
errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
printf ("Failed: %s\n", cudaGetErrorString(errcode));
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, 16, cudaMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
}
printf (" %d: %.3f, %.3f, %.3f clk\n", size*4,
sum_time/(kits*its*256.0),
min_time/(its*256.0),
max_time/(its*256.0));
}
printf ("\n");
}
void cmem_stride_2 (unsigned int *h_carray, unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, int stride, int min_size, int max_size, int step_size, unsigned int exec_mask)
{
dim3 Db = dim3(1);
dim3 Dg = dim3(31,1,1);
cudaError_t errcode;
if (max_size > 8128)
{
printf ("Size %d too big. Must be <= 8128 elements\n", max_size);
return;
}
printf ("Constant memory, %d-byte stride, blocks [", stride);
for (int i=0;i<31;i++)
if ((1<<i)&exec_mask) printf (" %d", i);
printf (" ]\n");
printf (" [array size]: [clocks per read], [max], [min]\n");
for (int size = min_size; size <= max_size; size+=step_size)
{
// Set up array contents
for (int i=0;i<size;i++)
{
h_carray[i] = i+stride;
if (h_carray[i] >= size)
h_carray[i] %= stride;
h_carray[8256+i] = 8256+i+stride;
if (h_carray[8256+i] >= 8256+size)
h_carray[8256+i] = 8256 + (h_carray[8256+i]%stride);
}
cudaMemcpyToSymbol(d_carray, h_carray, CARRAY_SIZE*4);
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
kclat<<<Dg, Db>>> (d_ts, d_out, exec_mask, 3, its);
errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
printf ("Failed: %s\n", cudaGetErrorString(errcode));
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, 16, cudaMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
}
printf (" %d: %.3f, %.3f, %.3f clk\n", size*4,
sum_time/(kits*its*256.0),
min_time/(its*256.0),
max_time/(its*256.0));
}
printf ("\n");
}
void cmem_icache_sharing (unsigned int *h_carray, unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, int stride, int min_size, int max_size, int step_size, unsigned int exec_mask)
{
dim3 Db = dim3(1);
dim3 Dg = dim3(31,1,1);
cudaError_t errcode;
if (max_size > 16384)
{
printf ("Size %d too big. Must be <= 16384 elements\n", max_size);
return;
}
printf ("Constant memory and Icache sharing, %d-byte stride, blocks [", stride);
for (int i=0;i<31;i++)
if ((1<<i)&exec_mask) printf (" %d", i);
printf (" ]\n");
printf (" [array size]: [clocks per read], [max], [min]\n");
for (int size = min_size; size <= max_size; size+=step_size)
{
// Set up array contents
for (int i=0;i<size;i++)
{
h_carray[i] = i+stride;
if (h_carray[i] >= size)
h_carray[i] %= stride;
}
cudaMemcpyToSymbol(d_carray, h_carray, CARRAY_SIZE*4);
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
kcicache_interfere<<<Dg, Db>>> (d_ts, d_out, exec_mask, 3, its);
errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
printf ("Failed: %s\n", cudaGetErrorString(errcode));
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, 16, cudaMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
}
printf (" %d: %.3f, %.3f, %.3f clk\n", size*4,
sum_time/(kits*its*256.0),
min_time/(its*256.0),
max_time/(its*256.0));
}
printf ("\n");
}
void cmem_bandwidth (unsigned int *d_ts, unsigned int *d_out, unsigned int *ts, unsigned int nblocks, int nthreads)
{
dim3 Db = dim3(nthreads);
dim3 Dg = dim3(nblocks,1,1);
dim3 Dgpad = dim3(0,1,1);
while ((Dg.x+Dgpad.x)%2 == 0 || (Dg.x+Dgpad.x)%5 == 0) Dgpad.x++;
cudaError_t errcode;
unsigned long long sum_time = {0};
unsigned int max_time=0, min_time=(unsigned)-1;
int kits = 20;
int its = 30;
for (int k = 0; k < kits; k++)
{
// Launch kernel
if (nthreads == 1)
kcbw<<<Dg, Db>>> (d_ts, d_out, 0, 0, its);
else
kcbw_8t<<<Dg, Db>>> (d_ts, d_out, 0, 0, its);
errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
printf ("Failed: %s\n", cudaGetErrorString(errcode));
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, 16, cudaMemcpyDeviceToHost);
sum_time += ts[1]-ts[0];
if (ts[1]-ts[0] > max_time) max_time = ts[1]-ts[0];
if (ts[1]-ts[0] < min_time) min_time = ts[1]-ts[0];
if (Dgpad.x > 0)
{
kcbw<<<Dgpad, Db>>> (d_ts, d_out, 0, 0, 1);
cudaThreadSynchronize();
}
}
printf (" %d: %.3f, %.3f, %.3f bytes/clk\n", nblocks,
(nblocks*kits*its*256.0*256.0*Db.x)/sum_time,
(nblocks*its*256.0*256.0*Db.x)/max_time,
(nblocks*its*256.0*256.0*Db.x)/min_time);
}
int main()
{
unsigned int ts[4096]; // ts, output from kernel. Two elements used per thread.
unsigned int *d_ts;
unsigned int *d_out; // Unused memory for storing output
unsigned int *h_carray;
// Allocate device array.
cudaError_t errcode;
if (cudaSuccess != (errcode = cudaMalloc((void**)&d_ts, sizeof(ts))))
{
printf ("cudaMalloc failed %s:%d\n", __FILE__, __LINE__);
printf (" %s\n", cudaGetErrorString(errcode));
return -1;
}
if (cudaSuccess != cudaMalloc((void**)&d_out, 4))
{
printf ("cudaMalloc failed %s:%d\n", __FILE__, __LINE__);
return -1;
}
h_carray = (unsigned int*)malloc(CARRAY_SIZE*4);
// Stride 256 overview
cmem_stride(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4);
// Stride 64 L2 and L3
cmem_stride(h_carray, d_ts, d_out, ts, 64/4, 6144/4, 40960/4, 64/4);
// Stride 16 L1
cmem_stride(h_carray, d_ts, d_out, ts, 16/4, 512-64, 512+192, 16/4);
// Different-TPC Sharing
printf ("Different TPC Testing ");
cmem_stride_2(h_carray, d_ts, d_out, ts, 256/4, 16, 8128, 256/4, 0x81);
// Same-TPC Sharing
printf ("Shared TPC Testing ");
cmem_stride_2(h_carray, d_ts, d_out, ts, 256/4, 16, 8128, 256/4, 0x401);
// Same-SM sharing, L1
printf ("Shared SM Testing ");
cmem_stride_2(h_carray, d_ts, d_out, ts, 256/4, 16, 8128, 256/4, 0x40000001);
// Instruction cache sharing
printf ("Different TPC ");
cmem_icache_sharing(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4, 0x81);
printf ("Shared TPC ");
cmem_icache_sharing(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4, 0x401);
printf ("Shared SM ");
cmem_icache_sharing(h_carray, d_ts, d_out, ts, 256/4, 16, 16384, 256/4, 0x40000001);
// Constant cache bandwidth
printf ("Constant cache L3 bandwidth, blocks touching addresses 2KB apart\n");
for (int nblocks = 1; nblocks <= 60; nblocks++)
cmem_bandwidth (d_ts, d_out, ts, nblocks, 1);
printf ("\n");
printf ("Constant cache L3 bandwidth, 8 threads/warp\n");
for (int nblocks = 1; nblocks <= 60; nblocks++)
cmem_bandwidth (d_ts, d_out, ts, nblocks, 8);
printf ("\n");
cudaFree(d_ts);
cudaFree(d_out);
free(h_carray);
return 0;
}
|
f8790313243f42c9f215d249f5228a107d29e9ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* The matrix addition example on CUDA
*/
#include <stdio.h>
#include "helpers.cuh"
//define the sizes of the matrices here
#define ROWS 10000
#define COLS 10000
#define SIZE ROWS*COLS
//kernel that does the matrix addition. Just add each element to the respective one
__global__ void addMatrix(int *ans_cuda,int *matA_cuda,int *matB_cuda){
/*blockDim.y gives the height of a block along y axis
blockDim.x gives the width of a block along x axis
blockIdx.y gives the index of the current block along the y axis
blockIdx.x gives the index of the current block along the x axis
threadIdx.y gives the index of the current thread in the current block along y axis
threadIdx.x gives the index of the current thread in the current block along x axis
*/
//calculate the row number based on block IDs and thread IDs
int row = blockDim.y * blockIdx.y + threadIdx.y;
//calculate the column number based on block IDs and thread IDs
int col = blockDim.x * blockIdx.x + threadIdx.x;
//to remove any indices beyond the size of the array
if (row<ROWS && col <COLS){
//conversion of 2 dimensional indices to single dimension
int position = row*COLS + col;
//do the calculation
int i,j=0;
for(i=0;i<10;i++){
for(j=0;j<10;j++){
int a=matA_cuda[position];
int b=matB_cuda[position];
ans_cuda[position]=(int)a*b-a-b-a/(double)b-b/(double)a;
}
}
}
}
int main(){
//allocate matrices
int *matA = (int *)malloc(sizeof(int)*SIZE);
int *matB = (int *)malloc(sizeof(int)*SIZE);
int *ans =(int *)malloc(sizeof(int)*SIZE);
if(matA==NULL || matB==NULL || ans==NULL){
perror("Mem full");
exit(1);
}
//generate
int row,col;
for(row=0;row<ROWS;row++){
for(col=0;col<COLS;col++){
int position = row*COLS + col;
matA[position]=row+col;
matB[position]=row*col;
}
}
/*************************CUDA STUFF STARTS HERE************************/
//variables for time measurements
hipEvent_t start,stop;
float elapsedtime;
//pointers for cuda memory locations
int *matA_cuda;
int *matB_cuda;
int *ans_cuda;
//allocate memory in cuda
checkCuda(hipMalloc((void **)&matA_cuda,sizeof(int)*SIZE));
checkCuda(hipMalloc((void **)&matB_cuda,sizeof(int)*SIZE));
checkCuda(hipMalloc((void **)&ans_cuda,sizeof(int)*SIZE));
//copy contents from ram to cuda
checkCuda(hipMemcpy(matA_cuda, matA, sizeof(int)*SIZE, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(matB_cuda, matB, sizeof(int)*SIZE, hipMemcpyHostToDevice));
//thread configuration
int blockwidth=1;
dim3 numBlocks(ceil(COLS/(float)blockwidth),ceil(ROWS/(float)blockwidth));
dim3 threadsPerBlock(blockwidth,blockwidth);
//do the matrix addition on CUDA
//the moment at which we start measuring the time
hipEventCreate(&start);
hipEventRecord(start,0);
hipLaunchKernelGGL(( addMatrix), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, ans_cuda,matA_cuda,matB_cuda);
checkCuda(hipGetLastError());
checkCuda(hipDeviceSynchronize());
//the moment at which we stop measuring time
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
//copy the answer back
checkCuda(hipMemcpy(ans, ans_cuda, sizeof(int)*SIZE, hipMemcpyDeviceToHost));
//free the memory we allocated on CUDA
checkCuda(hipFree(matA_cuda));
checkCuda(hipFree(matB_cuda));
checkCuda(hipFree(ans_cuda));
/*************************CUDA STUFF ENDS HERE************************/
/*write the answer
for(row=0;row<ROWS;row++){
for(col=0;col<COLS;col++){
int position = row*COLS + col;
printf("%5d ", ans[position]);
}
printf("\n");
}*/
//Find and print the elapsed time
hipEventElapsedTime(&elapsedtime,start,stop);
fprintf(stderr,"Time spent for operation is %.10f seconds\n",elapsedtime/(float)1000);
return 0;
} | f8790313243f42c9f215d249f5228a107d29e9ea.cu | /* The matrix addition example on CUDA
*/
#include <stdio.h>
#include "helpers.cuh"
//define the sizes of the matrices here
#define ROWS 10000
#define COLS 10000
#define SIZE ROWS*COLS
//kernel that does the matrix addition. Just add each element to the respective one
__global__ void addMatrix(int *ans_cuda,int *matA_cuda,int *matB_cuda){
/*blockDim.y gives the height of a block along y axis
blockDim.x gives the width of a block along x axis
blockIdx.y gives the index of the current block along the y axis
blockIdx.x gives the index of the current block along the x axis
threadIdx.y gives the index of the current thread in the current block along y axis
threadIdx.x gives the index of the current thread in the current block along x axis
*/
//calculate the row number based on block IDs and thread IDs
int row = blockDim.y * blockIdx.y + threadIdx.y;
//calculate the column number based on block IDs and thread IDs
int col = blockDim.x * blockIdx.x + threadIdx.x;
//to remove any indices beyond the size of the array
if (row<ROWS && col <COLS){
//conversion of 2 dimensional indices to single dimension
int position = row*COLS + col;
//do the calculation
int i,j=0;
for(i=0;i<10;i++){
for(j=0;j<10;j++){
int a=matA_cuda[position];
int b=matB_cuda[position];
ans_cuda[position]=(int)a*b-a-b-a/(double)b-b/(double)a;
}
}
}
}
int main(){
//allocate matrices
int *matA = (int *)malloc(sizeof(int)*SIZE);
int *matB = (int *)malloc(sizeof(int)*SIZE);
int *ans =(int *)malloc(sizeof(int)*SIZE);
if(matA==NULL || matB==NULL || ans==NULL){
perror("Mem full");
exit(1);
}
//generate
int row,col;
for(row=0;row<ROWS;row++){
for(col=0;col<COLS;col++){
int position = row*COLS + col;
matA[position]=row+col;
matB[position]=row*col;
}
}
/*************************CUDA STUFF STARTS HERE************************/
//variables for time measurements
cudaEvent_t start,stop;
float elapsedtime;
//pointers for cuda memory locations
int *matA_cuda;
int *matB_cuda;
int *ans_cuda;
//allocate memory in cuda
checkCuda(cudaMalloc((void **)&matA_cuda,sizeof(int)*SIZE));
checkCuda(cudaMalloc((void **)&matB_cuda,sizeof(int)*SIZE));
checkCuda(cudaMalloc((void **)&ans_cuda,sizeof(int)*SIZE));
//copy contents from ram to cuda
checkCuda(cudaMemcpy(matA_cuda, matA, sizeof(int)*SIZE, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(matB_cuda, matB, sizeof(int)*SIZE, cudaMemcpyHostToDevice));
//thread configuration
int blockwidth=1;
dim3 numBlocks(ceil(COLS/(float)blockwidth),ceil(ROWS/(float)blockwidth));
dim3 threadsPerBlock(blockwidth,blockwidth);
//do the matrix addition on CUDA
//the moment at which we start measuring the time
cudaEventCreate(&start);
cudaEventRecord(start,0);
addMatrix<<<numBlocks,threadsPerBlock>>>(ans_cuda,matA_cuda,matB_cuda);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
//the moment at which we stop measuring time
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
//copy the answer back
checkCuda(cudaMemcpy(ans, ans_cuda, sizeof(int)*SIZE, cudaMemcpyDeviceToHost));
//free the memory we allocated on CUDA
checkCuda(cudaFree(matA_cuda));
checkCuda(cudaFree(matB_cuda));
checkCuda(cudaFree(ans_cuda));
/*************************CUDA STUFF ENDS HERE************************/
/*write the answer
for(row=0;row<ROWS;row++){
for(col=0;col<COLS;col++){
int position = row*COLS + col;
printf("%5d ", ans[position]);
}
printf("\n");
}*/
//Find and print the elapsed time
cudaEventElapsedTime(&elapsedtime,start,stop);
fprintf(stderr,"Time spent for operation is %.10f seconds\n",elapsedtime/(float)1000);
return 0;
} |
13755440122c77572d48d3e03fa94da6fc39fd11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#define BASETYPE float
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void matDet(BASETYPE *d_matA, BASETYPE *detM, int desp){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE datos[];
BASETYPE *s_mat = &datos[0];
BASETYPE *s_detAux = &datos[desp];
int offset = (threadIdx.x)*16;
unsigned int i;
for(i = 0; i < 16; i++){
s_mat[(threadIdx.x) * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for(i = 0; i < 4; i++){
s_detAux[(threadIdx.x) * 4+i]=0;
}
__syncthreads();
// printf("globalId:%d|%d|%d|%d|%d\n",global_id,(threadIdx.x)*4,(threadIdx.x)*4+1,(threadIdx.x)*4+2,(threadIdx.x)*4+3);
s_detAux[(threadIdx.x)*4] += s_mat[offset] * ( (s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+13])+(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+1] += (-1*s_mat[offset+1]) * ( (s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+2] += s_mat[offset+2] * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+15])+(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+3] += (-1*s_mat[offset+3]) * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+14])+(s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+12])+(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+14])) );
detM[blockIdx.x * blockDim.x + (threadIdx.x)] = s_detAux[(threadIdx.x)*4] + s_detAux[(threadIdx.x)*4+1] + s_detAux[(threadIdx.x)*4+2] + s_detAux[(threadIdx.x)*4+3];
__syncthreads();
}
__global__ void vecMult(BASETYPE *d_matA,unsigned long n){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int i,j;
for(i = 0; i < 16; i++){
s_mat[threadIdx.x * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for( j = 1; j < blockDim.x; j *= 2 ){
if( threadIdx.x < blockDim.x / (j * 2)){
for( i = 0; i < 16; i++) {
s_mat[(threadIdx.x) * 16 + i] += s_mat[((threadIdx.x) * 16 + i) + (blockDim.x / (j * 2)) * 16]; // 2 * 16 = 32
}
}
__syncthreads();
}
if ((threadIdx.x) == 0){
for (i = 0; i < 16; i++){
d_matA[(blockIdx.x * 16) + i] = s_mat[i];
}
}
}
__global__ void vecMult2(BASETYPE *d_matA,unsigned long n,int offset_m,int cant_m ){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int i,j;
if( global_id < n){
for(i = 0; i < 16; i++){
s_mat[threadIdx.x * 16 + i]=d_matA[(offset_m * 16) +( global_id * 16 + i) ];
// printf("%d|||%.2lf||%d\n",global_id,s_mat[threadIdx.x * 16 + i],offset_m);
}
__syncthreads();
for( j = 1; j < cant_m; j *= 2 ){
if( threadIdx.x < cant_m / (j * 2)){
for( i = 0; i < 16; i++) {
s_mat[(threadIdx.x) * 16 + i] += s_mat[((threadIdx.x) * 16 + i) + (cant_m / (j * 2)) * 16]; // 2 * 16 = 32
}
}
__syncthreads();
}
if ((threadIdx.x) == 0){
for (i = 0; i < 16; i++){
// printf("%d|||%.2lf|||%d\n",global_id,s_mat[i],(offset_m / blockDim.x) + ((blockIdx.x * 16) + i));
d_matA[(offset_m / blockDim.x) * 16 + ((blockIdx.x * 16) + i)] = s_mat[i];
}
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N\n");
printf("Falta argumento: CUDA_BLK \n");
return 0;
}
//declaracion de variables
hipError_t error;
unsigned long N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi(argv[2]),GRID_BLK,cant_blk;
unsigned long numBytes = sizeof(BASETYPE)*4*4;
BASETYPE *matrices,*d_matrices,*d_detM,*detM;
double timetick;
unsigned long i,j;
int datos_matDet,datos_vecMult,matDet_desp;
matrices = (BASETYPE *)malloc(numBytes*N);
detM = (BASETYPE *)malloc(sizeof(BASETYPE)*N);
for (i = 0; i < 4*4*N; i++){
matrices[i] = 1;
}
for (i = 0; i < N; i++){
detM[i] = 0;
}
matrices[2] = 220;
matrices[13] = 220;
matrices[7] = 6;
matrices[14] = 6;
//comment
hipMalloc((void **) &d_matrices, numBytes*N);
hipMalloc((void **) &d_detM, sizeof(BASETYPE)*N);
datos_matDet = numBytes * CUDA_BLK + sizeof(BASETYPE) * 4 * CUDA_BLK;
datos_vecMult = numBytes * CUDA_BLK;
matDet_desp = CUDA_BLK * 16;
cant_blk = N / CUDA_BLK;
dim3 dimBlock(CUDA_BLK);
dim3 dimGrid(cant_blk);
timetick = dwalltime();
hipMemcpy(d_matrices, matrices, numBytes*N, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_detM, detM, sizeof(BASETYPE)*N, hipMemcpyHostToDevice); // CPU -> GPU
hipLaunchKernelGGL(( matDet), dim3(dimGrid), dim3(dimBlock),datos_matDet, 0, d_matrices,d_detM,matDet_desp);
hipDeviceSynchronize();
for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
if ((i % CUDA_BLK) == 0){
// printf("primero---------------------------------\n");
dim3 dimGrid(GRID_BLK);
hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock),datos_vecMult, 0, d_matrices,i);
hipDeviceSynchronize();
} else{
if(GRID_BLK != 0){
hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock),datos_vecMult, 0, d_matrices,i);
hipDeviceSynchronize();
}
// printf("segundo---------------------------------\n");
dim3 dimGrid2(1);
hipLaunchKernelGGL(( vecMult2), dim3(dimGrid2), dim3(dimBlock),datos_vecMult, 0, d_matrices,(i % CUDA_BLK),GRID_BLK * CUDA_BLK,(i % CUDA_BLK));
hipDeviceSynchronize();
i = i + (i % CUDA_BLK);
}
}
/* for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
dim3 dimGrid(GRID_BLK);
hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock),datos_vecMult, 0, d_matrices,i);
hipDeviceSynchronize();
}*/
hipMemcpy(matrices, d_matrices, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
hipMemcpy(detM, d_detM, sizeof(BASETYPE)*N, hipMemcpyDeviceToHost); // GPU -> CPU
for(i = 1; i < N ; i++){
detM[0] += detM[i];
}
detM[0] = detM[0] / N;
for (i = 0; i < 4*4; i++){
matrices[i] *= detM[0];
}
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n",error);
printf("%.2lf|\n",detM[0]);
for(i=0; i < 4; i++){
for(j=0; j < 4; j++){
printf("%.2lf|",matrices[i*4+j]);
}
printf("\n");
}
hipFree(d_matrices);
hipFree(d_detM);
free(matrices);
free(detM);
return 0;
}
| 13755440122c77572d48d3e03fa94da6fc39fd11.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#define BASETYPE float
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void matDet(BASETYPE *d_matA, BASETYPE *detM, int desp){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE datos[];
BASETYPE *s_mat = &datos[0];
BASETYPE *s_detAux = &datos[desp];
int offset = (threadIdx.x)*16;
unsigned int i;
for(i = 0; i < 16; i++){
s_mat[(threadIdx.x) * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for(i = 0; i < 4; i++){
s_detAux[(threadIdx.x) * 4+i]=0;
}
__syncthreads();
// printf("globalId:%d|%d|%d|%d|%d\n",global_id,(threadIdx.x)*4,(threadIdx.x)*4+1,(threadIdx.x)*4+2,(threadIdx.x)*4+3);
s_detAux[(threadIdx.x)*4] += s_mat[offset] * ( (s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+13])+(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+1] += (-1*s_mat[offset+1]) * ( (s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+2] += s_mat[offset+2] * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+15])+(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+3] += (-1*s_mat[offset+3]) * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+14])+(s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+12])+(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+14])) );
detM[blockIdx.x * blockDim.x + (threadIdx.x)] = s_detAux[(threadIdx.x)*4] + s_detAux[(threadIdx.x)*4+1] + s_detAux[(threadIdx.x)*4+2] + s_detAux[(threadIdx.x)*4+3];
__syncthreads();
}
__global__ void vecMult(BASETYPE *d_matA,unsigned long n){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int i,j;
for(i = 0; i < 16; i++){
s_mat[threadIdx.x * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for( j = 1; j < blockDim.x; j *= 2 ){
if( threadIdx.x < blockDim.x / (j * 2)){
for( i = 0; i < 16; i++) {
s_mat[(threadIdx.x) * 16 + i] += s_mat[((threadIdx.x) * 16 + i) + (blockDim.x / (j * 2)) * 16]; // 2 * 16 = 32
}
}
__syncthreads();
}
if ((threadIdx.x) == 0){
for (i = 0; i < 16; i++){
d_matA[(blockIdx.x * 16) + i] = s_mat[i];
}
}
}
__global__ void vecMult2(BASETYPE *d_matA,unsigned long n,int offset_m,int cant_m ){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int i,j;
if( global_id < n){
for(i = 0; i < 16; i++){
s_mat[threadIdx.x * 16 + i]=d_matA[(offset_m * 16) +( global_id * 16 + i) ];
// printf("%d|||%.2lf||%d\n",global_id,s_mat[threadIdx.x * 16 + i],offset_m);
}
__syncthreads();
for( j = 1; j < cant_m; j *= 2 ){
if( threadIdx.x < cant_m / (j * 2)){
for( i = 0; i < 16; i++) {
s_mat[(threadIdx.x) * 16 + i] += s_mat[((threadIdx.x) * 16 + i) + (cant_m / (j * 2)) * 16]; // 2 * 16 = 32
}
}
__syncthreads();
}
if ((threadIdx.x) == 0){
for (i = 0; i < 16; i++){
// printf("%d|||%.2lf|||%d\n",global_id,s_mat[i],(offset_m / blockDim.x) + ((blockIdx.x * 16) + i));
d_matA[(offset_m / blockDim.x) * 16 + ((blockIdx.x * 16) + i)] = s_mat[i];
}
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N\n");
printf("Falta argumento: CUDA_BLK \n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned long N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi(argv[2]),GRID_BLK,cant_blk;
unsigned long numBytes = sizeof(BASETYPE)*4*4;
BASETYPE *matrices,*d_matrices,*d_detM,*detM;
double timetick;
unsigned long i,j;
int datos_matDet,datos_vecMult,matDet_desp;
matrices = (BASETYPE *)malloc(numBytes*N);
detM = (BASETYPE *)malloc(sizeof(BASETYPE)*N);
for (i = 0; i < 4*4*N; i++){
matrices[i] = 1;
}
for (i = 0; i < N; i++){
detM[i] = 0;
}
matrices[2] = 220;
matrices[13] = 220;
matrices[7] = 6;
matrices[14] = 6;
//comment
cudaMalloc((void **) &d_matrices, numBytes*N);
cudaMalloc((void **) &d_detM, sizeof(BASETYPE)*N);
datos_matDet = numBytes * CUDA_BLK + sizeof(BASETYPE) * 4 * CUDA_BLK;
datos_vecMult = numBytes * CUDA_BLK;
matDet_desp = CUDA_BLK * 16;
cant_blk = N / CUDA_BLK;
dim3 dimBlock(CUDA_BLK);
dim3 dimGrid(cant_blk);
timetick = dwalltime();
cudaMemcpy(d_matrices, matrices, numBytes*N, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_detM, detM, sizeof(BASETYPE)*N, cudaMemcpyHostToDevice); // CPU -> GPU
matDet<<<dimGrid, dimBlock,datos_matDet>>>(d_matrices,d_detM,matDet_desp);
cudaThreadSynchronize();
for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
if ((i % CUDA_BLK) == 0){
// printf("primero---------------------------------\n");
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
} else{
if(GRID_BLK != 0){
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
}
// printf("segundo---------------------------------\n");
dim3 dimGrid2(1);
vecMult2<<<dimGrid2, dimBlock,datos_vecMult>>>(d_matrices,(i % CUDA_BLK),GRID_BLK * CUDA_BLK,(i % CUDA_BLK));
cudaThreadSynchronize();
i = i + (i % CUDA_BLK);
}
}
/* for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
}*/
cudaMemcpy(matrices, d_matrices, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
cudaMemcpy(detM, d_detM, sizeof(BASETYPE)*N, cudaMemcpyDeviceToHost); // GPU -> CPU
for(i = 1; i < N ; i++){
detM[0] += detM[i];
}
detM[0] = detM[0] / N;
for (i = 0; i < 4*4; i++){
matrices[i] *= detM[0];
}
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n",error);
printf("%.2lf|\n",detM[0]);
for(i=0; i < 4; i++){
for(j=0; j < 4; j++){
printf("%.2lf|",matrices[i*4+j]);
}
printf("\n");
}
cudaFree(d_matrices);
cudaFree(d_detM);
free(matrices);
free(detM);
return 0;
}
|
d0779c22e99b70a5ead6586f23ee2e45124cb694.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <stdio.h>
#include <fstream>
#include <cstdio>
#include <cstdint>
#include <vector>
using namespace std;
//----------------------------------------------------------------------------
// Vec.h
//----------------------------------------------------------------------------
struct Vec3i{
union {
struct {
int x, y, z;
};
int data[3];
};
__host__ __device__ Vec3i() = default;
__host__ __device__ Vec3i(int x, int y, int z): x(x), y(y), z(z) {}
__host__ __device__ Vec3i(int v): x(v), y(v), z(v) {}
__host__ __device__ Vec3i &operator+=(const Vec3i &r) { x+=r.x; y+=r.y; z+=r.z; return *this; }
__host__ __device__ Vec3i &operator-=(const Vec3i &r) { x-=r.x; y-=r.y; z-=r.z; return *this; }
__host__ __device__ Vec3i &operator*=(const Vec3i &r) { x*=r.x; y*=r.y; z*=r.z; return *this; }
__host__ __device__ Vec3i &operator/=(const Vec3i &r) { x/=r.x; y/=r.y; z/=r.z; return *this; }
int &operator[](int i) { return data[i]; }
int operator[](int i) const { return data[i]; }
};
struct Vec3f{
union {
struct {
float x, y, z;
};
float data[3];
};
__host__ __device__ Vec3f() = default;
__host__ __device__ Vec3f(float x, float y, float z): x(x), y(y), z(z) {}
__host__ __device__ Vec3f(float v): x(v), y(v), z(v) {}
__host__ __device__ Vec3f &operator+=(const Vec3f &r) { x+=r.x; y+=r.y; z+=r.z; return *this; }
__host__ __device__ Vec3f &operator-=(const Vec3f &r) { x-=r.x; y-=r.y; z-=r.z; return *this; }
__host__ __device__ Vec3f &operator*=(const Vec3f &r) { x*=r.x; y*=r.y; z*=r.z; return *this; }
__host__ __device__ Vec3f &operator/=(const Vec3f &r) { x/=r.x; y/=r.y; z/=r.z; return *this; }
__host__ __device__ float &operator[](int i) { return data[i]; }
__host__ __device__ float operator[](int i) const { return data[i]; }
};
static inline Vec3f operator-(const Vec3f &l, const Vec3f &r) { return {l.x - r.x, l.y - r.y, l.z - r.z}; }
static inline Vec3f operator/(const Vec3f &l, const Vec3f &r) { return {l.x / r.x, l.y / r.y, l.z / r.z}; }
static inline Vec3f cross(const Vec3f &v1, const Vec3f &v2) { return Vec3f(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x); }
//static inline Vec3f normalize(const Vec3f &v) { return v / Vec3f(sqrtf(v.x*v.x + v.y+v.y + v.z*v.z)); }
__host__ __device__ Vec3f normalize(const Vec3f &v) { return v / Vec3f(sqrtf(v.x*v.x + v.y+v.y + v.z*v.z)); }
//static inline Vec3f operator-(const Vec3f &l, const Vec3f &r) { return {l.x - r.x, l.y - r.y, l.z - r.z}; }
//static inline Vec3f operator/(const Vec3f &l, const Vec3f &r) { return {l.x / r.x, l.y / r.y, l.z / r.z}; }
//static inline Vec3f cross(const Vec3f &v1, const Vec3f &v2) { return Vec3f(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x); }
//static inline Vec3f normalize(const Vec3f &v) { return v / Vec3f(std::sqrt(v.x*v.x + v.y+v.y + v.z*v.z)); }
//----------------------------------------------------------------------------
// Geometry
//----------------------------------------------------------------------------
struct Vertex {
Vec3f position;
Vec3f normal;
Vec3f color;
};
//float* voxels;
//Vertex vertices[MAXIMUM_NUM];
//unsigned int vertices_num = 0;
__constant__ uint64_t marching_cube_tris[256] = {
0ULL, 33793ULL, 36945ULL, 159668546ULL,
18961ULL, 144771090ULL, 5851666ULL, 595283255635ULL,
20913ULL, 67640146ULL, 193993474ULL, 655980856339ULL,
88782242ULL, 736732689667ULL, 797430812739ULL, 194554754ULL,
26657ULL, 104867330ULL, 136709522ULL, 298069416227ULL,
109224258ULL, 8877909667ULL, 318136408323ULL, 1567994331701604ULL,
189884450ULL, 350847647843ULL, 559958167731ULL, 3256298596865604ULL,
447393122899ULL, 651646838401572ULL, 2538311371089956ULL, 737032694307ULL,
29329ULL, 43484162ULL, 91358498ULL, 374810899075ULL,
158485010ULL, 178117478419ULL, 88675058979ULL, 433581536604804ULL,
158486962ULL, 649105605635ULL, 4866906995ULL, 3220959471609924ULL,
649165714851ULL, 3184943915608436ULL, 570691368417972ULL, 595804498035ULL,
124295042ULL, 431498018963ULL, 508238522371ULL, 91518530ULL,
318240155763ULL, 291789778348404ULL, 1830001131721892ULL, 375363605923ULL,
777781811075ULL, 1136111028516116ULL, 3097834205243396ULL, 508001629971ULL,
2663607373704004ULL, 680242583802939237ULL, 333380770766129845ULL, 179746658ULL,
42545ULL, 138437538ULL, 93365810ULL, 713842853011ULL,
73602098ULL, 69575510115ULL, 23964357683ULL, 868078761575828ULL,
28681778ULL, 713778574611ULL, 250912709379ULL, 2323825233181284ULL,
302080811955ULL, 3184439127991172ULL, 1694042660682596ULL, 796909779811ULL,
176306722ULL, 150327278147ULL, 619854856867ULL, 1005252473234484ULL,
211025400963ULL, 36712706ULL, 360743481544788ULL, 150627258963ULL,
117482600995ULL, 1024968212107700ULL, 2535169275963444ULL, 4734473194086550421ULL,
628107696687956ULL, 9399128243ULL, 5198438490361643573ULL, 194220594ULL,
104474994ULL, 566996932387ULL, 427920028243ULL, 2014821863433780ULL,
492093858627ULL, 147361150235284ULL, 2005882975110676ULL, 9671606099636618005ULL,
777701008947ULL, 3185463219618820ULL, 482784926917540ULL, 2900953068249785909ULL,
1754182023747364ULL, 4274848857537943333ULL, 13198752741767688709ULL, 2015093490989156ULL,
591272318771ULL, 2659758091419812ULL, 1531044293118596ULL, 298306479155ULL,
408509245114388ULL, 210504348563ULL, 9248164405801223541ULL, 91321106ULL,
2660352816454484ULL, 680170263324308757ULL, 8333659837799955077ULL, 482966828984116ULL,
4274926723105633605ULL, 3184439197724820ULL, 192104450ULL, 15217ULL,
45937ULL, 129205250ULL, 129208402ULL, 529245952323ULL,
169097138ULL, 770695537027ULL, 382310500883ULL, 2838550742137652ULL,
122763026ULL, 277045793139ULL, 81608128403ULL, 1991870397907988ULL,
362778151475ULL, 2059003085103236ULL, 2132572377842852ULL, 655681091891ULL,
58419234ULL, 239280858627ULL, 529092143139ULL, 1568257451898804ULL,
447235128115ULL, 679678845236084ULL, 2167161349491220ULL, 1554184567314086709ULL,
165479003923ULL, 1428768988226596ULL, 977710670185060ULL, 10550024711307499077ULL,
1305410032576132ULL, 11779770265620358997ULL, 333446212255967269ULL, 978168444447012ULL,
162736434ULL, 35596216627ULL, 138295313843ULL, 891861543990356ULL,
692616541075ULL, 3151866750863876ULL, 100103641866564ULL, 6572336607016932133ULL,
215036012883ULL, 726936420696196ULL, 52433666ULL, 82160664963ULL,
2588613720361524ULL, 5802089162353039525ULL, 214799000387ULL, 144876322ULL,
668013605731ULL, 110616894681956ULL, 1601657732871812ULL, 430945547955ULL,
3156382366321172ULL, 7644494644932993285ULL, 3928124806469601813ULL, 3155990846772900ULL,
339991010498708ULL, 10743689387941597493ULL, 5103845475ULL, 105070898ULL,
3928064910068824213ULL, 156265010ULL, 1305138421793636ULL, 27185ULL,
195459938ULL, 567044449971ULL, 382447549283ULL, 2175279159592324ULL,
443529919251ULL, 195059004769796ULL, 2165424908404116ULL, 1554158691063110021ULL,
504228368803ULL, 1436350466655236ULL, 27584723588724ULL, 1900945754488837749ULL,
122971970ULL, 443829749251ULL, 302601798803ULL, 108558722ULL,
724700725875ULL, 43570095105972ULL, 2295263717447940ULL, 2860446751369014181ULL,
2165106202149444ULL, 69275726195ULL, 2860543885641537797ULL, 2165106320445780ULL,
2280890014640004ULL, 11820349930268368933ULL, 8721082628082003989ULL, 127050770ULL,
503707084675ULL, 122834978ULL, 2538193642857604ULL, 10129ULL,
801441490467ULL, 2923200302876740ULL, 1443359556281892ULL, 2901063790822564949ULL,
2728339631923524ULL, 7103874718248233397ULL, 12775311047932294245ULL, 95520290ULL,
2623783208098404ULL, 1900908618382410757ULL, 137742672547ULL, 2323440239468964ULL,
362478212387ULL, 727199575803140ULL, 73425410ULL, 34337ULL,
163101314ULL, 668566030659ULL, 801204361987ULL, 73030562ULL,
591509145619ULL, 162574594ULL, 100608342969108ULL, 5553ULL,
724147968595ULL, 1436604830452292ULL, 176259090ULL, 42001ULL,
143955266ULL, 2385ULL, 18433ULL, 0ULL,
};
__host__ __device__ int offset_3d(const Vec3i &p, const Vec3i &size)
{
return (p.z * size.y + p.y) * size.x + p.x;
}
__host__ __device__ int offset_3d(const int x, const int y, const int z, const int sizex, const int sizey, const int sizez)
{
// return (p.z * size.y + p.y) * size.x + p.x;
return (z*sizey + y)*sizex + x;
}
__host__ __device__ bool valid(int x, int y, int z, int dimx, int dimy, int dimz)
{
if ( x >= 0 && x < dimx)
if ( y >= 0 && y < dimy)
if ( z >= 0 && z < dimz)
return true;
return false;
}
__host__ __device__ void triangle(Vertex &va, Vertex &vb, Vertex &vc)
{
const Vec3f ab = va.position - vb.position;
const Vec3f cb = vc.position - vb.position;
const Vec3f n = cross(cb, ab);
va.normal += n;
vb.normal += n;
vc.normal += n;
}
__host__ __device__ void do_edge (Vertex* edge_indices, int n_edge, float va, float vb, int axis, const Vec3f &base) {
if ((va < 0.0) == (vb < 0.0))
return;
Vec3f v = base;
v[axis] += va / (va - vb);
edge_indices[n_edge] = {v,Vec3f(0)};
//edge_indices[n_edge] = {v};
};
__device__ void do_edge (float* edge_indices, int n_edge, float va, float vb, int axis, int x, int y, int z) {
if ((va < 0.0) == (vb < 0.0))
return;
float v[3];
v[0] = x*1.0;
v[1] = y*1.0;
v[2] = z*1.0;
v[axis] += va / (va - vb);
edge_indices[6*n_edge+0] = v[0];
edge_indices[6*n_edge+1] = v[1];
edge_indices[6*n_edge+2] = v[2];
edge_indices[6*n_edge+3] = 0.0f;
edge_indices[6*n_edge+4] = 0.0f;
edge_indices[6*n_edge+5] = 0.0f;
};
//__device__ unsigned int num_vert[1] = {0};
//generate_geometry_kernel<<<grid,block>>>(d_voxels,d_vertices,marching_cube_tris_gpu,num_vert);
//__global__ void generate_geometry_kernel(float* voxels, Vertex* vertices)
extern "C"{
__global__ void mc_kernel(Vertex* vertices,float* voxels, int bidx, int boff, int dimx, int dimy, int dimz, unsigned int* num_vert,int* isolist, int start, int end, int N, float* color_table, int table_num)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int z = threadIdx.z + blockDim.z * blockIdx.z;
//printf("Call kernel [%d,%d,%d] \n",x,y,z);
if ( x >= 0 && y >= 0 && z > 0 &&
x < dimx && y < dimy && z < dimz+1){
float vs[8];
for (int j = start ; j < end ; j++)
{
int isovalue = isolist[j];
if (isovalue == 0)
continue;
for (int i =0 ; i <8;i++)
vs[i] =0;
if (valid(x ,y ,z ,dimx,dimy,dimz+2))
vs[0] = voxels[offset_3d({x, y, z}, Vec3i((dimx)))];
if (valid(x+1,y ,z ,dimx,dimy,dimz+2))
vs[1] = voxels[offset_3d({x+1, y, z}, Vec3i((dimx)))];
if (valid(x ,y+1,z ,dimx,dimy,dimz+2))
vs[2] = voxels[offset_3d({x, y+1, z}, Vec3i((dimx)))];
if (valid(x+1,y+1,z ,dimx,dimy,dimz+2))
vs[3] = voxels[offset_3d({x+1, y+1, z}, Vec3i((dimx)))];
if (valid(x ,y ,z+1,dimx,dimy,dimz+2))
vs[4] = voxels[offset_3d({x, y, z+1}, Vec3i((dimx)))];
if (valid(x+1,y ,z+1,dimx,dimy,dimz+2))
vs[5] = voxels[offset_3d({x+1, y, z+1}, Vec3i((dimx)))];
if (valid(x ,y+1,z+1,dimx,dimy,dimz+2))
vs[6] = voxels[offset_3d({x, y+1, z+1}, Vec3i((dimx)))];
if (valid(x+1,y+1,z+1,dimx,dimy,dimz+2))
vs[7] = voxels[offset_3d({x+1, y+1, z+1}, Vec3i((dimx)))];
int check = 0;
for (int i = 0 ; i < 8 ; i++)
if ((int)vs[i] == isovalue)
check++;
if (isovalue == 0 || check ==0) continue;
for (int i = 0 ; i < 8 ; i++){
vs[i] = abs(vs[i] - isovalue) -0.5;
}
const int config_n =
((vs[0] < 0.0f) << 0) | // *1
((vs[1] < 0.0f) << 1) | // *2
((vs[2] < 0.0f) << 2) | // *4
((vs[3] < 0.0f) << 3) | // *8
((vs[4] < 0.0f) << 4) | // *16
((vs[5] < 0.0f) << 5) | // *32
((vs[6] < 0.0f) << 6) | // *64
((vs[7] < 0.0f) << 7); // *128
if (config_n == 0 || config_n == 255)
continue;
//int index_base1 = atomicAdd(num_vert,1);
// vector<Vertex> vert;
// int edge_indices[12];
//float edge_indices[12*6];
num_vert[1] = bidx;
z -= 1;
z += bidx*boff;
x -= N/2;
y -= N/2;
z -= N/2;
Vertex edge_indices[12];
do_edge(edge_indices, 0, vs[0], vs[1], 0, Vec3f(x, y, z));
do_edge(edge_indices, 1, vs[2], vs[3], 0, Vec3f(x, y+1, z));
do_edge(edge_indices, 2, vs[4], vs[5], 0, Vec3f(x, y, z+1));
do_edge(edge_indices, 3, vs[6], vs[7], 0, Vec3f(x, y+1, z+1));
do_edge(edge_indices, 4, vs[0], vs[2], 1, Vec3f(x, y, z));
do_edge(edge_indices, 5, vs[1], vs[3], 1, Vec3f(x+1, y, z));
do_edge(edge_indices, 6, vs[4], vs[6], 1, Vec3f(x, y, z+1));
do_edge(edge_indices, 7, vs[5], vs[7], 1, Vec3f(x+1, y, z+1));
do_edge(edge_indices, 8, vs[0], vs[4], 2, Vec3f(x, y, z));
do_edge(edge_indices, 9, vs[1], vs[5], 2, Vec3f(x+1, y, z));
do_edge(edge_indices, 10, vs[2], vs[6], 2, Vec3f(x, y+1, z));
do_edge(edge_indices, 11, vs[3], vs[7], 2, Vec3f(x+1, y+1, z));
/* */
/* do_edge(edge_indices, 0, vs[0], vs[1], 0, x, y, z); */
/* do_edge(edge_indices, 1, vs[2], vs[3], 0, x, y+1, z); */
/* do_edge(edge_indices, 2, vs[4], vs[5], 0, x, y, z+1); */
/* do_edge(edge_indices, 3, vs[6], vs[7], 0, x, y+1, z+1); */
/* */
/* do_edge(edge_indices, 4, vs[0], vs[2], 1, x, y, z); */
/* do_edge(edge_indices, 5, vs[1], vs[3], 1, x+1, y, z); */
/* do_edge(edge_indices, 6, vs[4], vs[6], 1, x, y, z+1); */
/* do_edge(edge_indices, 7, vs[5], vs[7], 1, x+1, y, z+1); */
/* */
/* do_edge(edge_indices, 8, vs[0], vs[4], 2, x, y, z); */
/* do_edge(edge_indices, 9, vs[1], vs[5], 2, x+1, y, z); */
/* do_edge(edge_indices, 10, vs[2], vs[6], 2, x, y+1, z); */
/* do_edge(edge_indices, 11, vs[3], vs[7], 2, x+1, y+1, z); */
const uint64_t config = marching_cube_tris[config_n];
const int n_triangles = config & 0xF; // Maximum 15
const int n_indices = n_triangles * 3; // Maximu 45
int offset = 4;
int index_base = atomicAdd(num_vert,n_indices);
//num_vert+= n_indices;
/* */
/* for (int i = 0; i < n_indices; i++) { */
/* const int edge = (config >> offset) & 0xF; */
/* //vertices.push_back(edge_indices[edge]); */
/* // vertices[3*index_base+0] = bidx*boff; */
/* // vertices[3*index_base+1] = bidx*boff; */
/* // vertices[3*index_base+2] = bidx*boff; */
/* vertices[3*index_base+0] = edge_indices[3*edge+0]; */
/* vertices[3*index_base+1] = edge_indices[3*edge+1]; */
/* vertices[3*index_base+2] = edge_indices[3*edge+2] + bidx*boff; */
/* index_base ++; */
/* offset += 4; */
/* } */
/* */
//if (index_base + n_indices< MAXIMUM_NUM){
if (true){
int index = index_base;
for (int i = 0; i < n_indices; i++) {
const int edge = (config >> offset) & 0xF;
//vertices.push_back(edge_indices[edge]);
vertices[index++] = edge_indices[edge];
offset += 4;
}
for (int i = 0; i < n_triangles; i++) {
triangle(vertices[index_base+i*3+2],
vertices[index_base+i*3+1],
vertices[index_base+i*3+0]);
}
for (int i = 0; i < n_indices; i++) {
vertices[index_base+i].normal = normalize(vertices[index_base+i].normal);
vertices[index_base+i].position.data[0] /= N;
vertices[index_base+i].position.data[1] /= N;
vertices[index_base+i].position.data[2] /= N;
int color_num = j%table_num;
vertices[index_base+i].color.data[0] = color_table[3*color_num+0];
vertices[index_base+i].color.data[1] = color_table[3*color_num+1];
vertices[index_base+i].color.data[2] = color_table[3*color_num+2];
/*
if ( j % 4 == 0){dd
vertices[index_base+i].color.data[0] = 0.2;
vertices[index_base+i].color.data[1] = 1.0;
vertices[index_base+i].color.data[2] = 0.2;
}
else if ( j % 4 == 1){
vertices[index_base+i].color.data[0] = 1.0;
vertices[index_base+i].color.data[1] = 0.2;
vertices[index_base+i].color.data[2] = 0.2;
}
else if ( j % 4 == 2){
vertices[index_base+i].color.data[0] = 0.2;
vertices[index_base+i].color.data[1] = 0.2;
vertices[index_base+i].color.data[2] = 1.0;
}else {
vertices[index_base+i].color.data[0] = 0.2;
vertices[index_base+i].color.data[1] = 1.0;
vertices[index_base+i].color.data[2] = 1.0;
}*/
}
}
}
}
}
//__global__ void mc_kernel(Vertex* vertices,float* voxels, int bidx, int boff, int dimx, int dimy, int dimz, unsigned int* num_vert,int* isolist, int start, int end, int N)
__global__ void composite(char* output, char* input, int num_img, int dim_x, int dim_y, int dummy1, int dummy2, unsigned int* num_vert) {
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
if(dim_x <= idx_x || dim_y <= idx_y) return;
int index = (idx_y * dim_x + idx_x);
char* img1 = input;
float *depth_table1 = (float*) (img1 + dim_x*dim_y*3);
float val1 = depth_table1[index];
char* img = img1;
//RGB Copy
output[3*index+0] = img[3*index+0];
output[3*index+1] = img[3*index+1];
output[3*index+2] = img[3*index+2];
for (int i = 1 ; i< num_img ;i++)
{
char* img2 = input+(dim_x*dim_y*7)*i;
float *depth_table2 = (float*) (img2 + dim_x*dim_y*3);
float val2 = depth_table2[index];
// unsigned char* image = (unsigned char*) input;
if (val1 > val2){
val1 = val2;
//RGB Copy
output[3*index+0] = img2[3*index+0];
output[3*index+1] = img2[3*index+1];
output[3*index+2] = img2[3*index+2];
}
}
return ;
}
}
| d0779c22e99b70a5ead6586f23ee2e45124cb694.cu |
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <stdio.h>
#include <fstream>
#include <cstdio>
#include <cstdint>
#include <vector>
using namespace std;
//----------------------------------------------------------------------------
// Vec.h
//----------------------------------------------------------------------------
struct Vec3i{
union {
struct {
int x, y, z;
};
int data[3];
};
__host__ __device__ Vec3i() = default;
__host__ __device__ Vec3i(int x, int y, int z): x(x), y(y), z(z) {}
__host__ __device__ Vec3i(int v): x(v), y(v), z(v) {}
__host__ __device__ Vec3i &operator+=(const Vec3i &r) { x+=r.x; y+=r.y; z+=r.z; return *this; }
__host__ __device__ Vec3i &operator-=(const Vec3i &r) { x-=r.x; y-=r.y; z-=r.z; return *this; }
__host__ __device__ Vec3i &operator*=(const Vec3i &r) { x*=r.x; y*=r.y; z*=r.z; return *this; }
__host__ __device__ Vec3i &operator/=(const Vec3i &r) { x/=r.x; y/=r.y; z/=r.z; return *this; }
int &operator[](int i) { return data[i]; }
int operator[](int i) const { return data[i]; }
};
struct Vec3f{
union {
struct {
float x, y, z;
};
float data[3];
};
__host__ __device__ Vec3f() = default;
__host__ __device__ Vec3f(float x, float y, float z): x(x), y(y), z(z) {}
__host__ __device__ Vec3f(float v): x(v), y(v), z(v) {}
__host__ __device__ Vec3f &operator+=(const Vec3f &r) { x+=r.x; y+=r.y; z+=r.z; return *this; }
__host__ __device__ Vec3f &operator-=(const Vec3f &r) { x-=r.x; y-=r.y; z-=r.z; return *this; }
__host__ __device__ Vec3f &operator*=(const Vec3f &r) { x*=r.x; y*=r.y; z*=r.z; return *this; }
__host__ __device__ Vec3f &operator/=(const Vec3f &r) { x/=r.x; y/=r.y; z/=r.z; return *this; }
__host__ __device__ float &operator[](int i) { return data[i]; }
__host__ __device__ float operator[](int i) const { return data[i]; }
};
static inline Vec3f operator-(const Vec3f &l, const Vec3f &r) { return {l.x - r.x, l.y - r.y, l.z - r.z}; }
static inline Vec3f operator/(const Vec3f &l, const Vec3f &r) { return {l.x / r.x, l.y / r.y, l.z / r.z}; }
static inline Vec3f cross(const Vec3f &v1, const Vec3f &v2) { return Vec3f(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x); }
//static inline Vec3f normalize(const Vec3f &v) { return v / Vec3f(sqrtf(v.x*v.x + v.y+v.y + v.z*v.z)); }
__host__ __device__ Vec3f normalize(const Vec3f &v) { return v / Vec3f(sqrtf(v.x*v.x + v.y+v.y + v.z*v.z)); }
//static inline Vec3f operator-(const Vec3f &l, const Vec3f &r) { return {l.x - r.x, l.y - r.y, l.z - r.z}; }
//static inline Vec3f operator/(const Vec3f &l, const Vec3f &r) { return {l.x / r.x, l.y / r.y, l.z / r.z}; }
//static inline Vec3f cross(const Vec3f &v1, const Vec3f &v2) { return Vec3f(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x); }
//static inline Vec3f normalize(const Vec3f &v) { return v / Vec3f(std::sqrt(v.x*v.x + v.y+v.y + v.z*v.z)); }
//----------------------------------------------------------------------------
// Geometry
//----------------------------------------------------------------------------
struct Vertex {
Vec3f position;
Vec3f normal;
Vec3f color;
};
//float* voxels;
//Vertex vertices[MAXIMUM_NUM];
//unsigned int vertices_num = 0;
__constant__ uint64_t marching_cube_tris[256] = {
0ULL, 33793ULL, 36945ULL, 159668546ULL,
18961ULL, 144771090ULL, 5851666ULL, 595283255635ULL,
20913ULL, 67640146ULL, 193993474ULL, 655980856339ULL,
88782242ULL, 736732689667ULL, 797430812739ULL, 194554754ULL,
26657ULL, 104867330ULL, 136709522ULL, 298069416227ULL,
109224258ULL, 8877909667ULL, 318136408323ULL, 1567994331701604ULL,
189884450ULL, 350847647843ULL, 559958167731ULL, 3256298596865604ULL,
447393122899ULL, 651646838401572ULL, 2538311371089956ULL, 737032694307ULL,
29329ULL, 43484162ULL, 91358498ULL, 374810899075ULL,
158485010ULL, 178117478419ULL, 88675058979ULL, 433581536604804ULL,
158486962ULL, 649105605635ULL, 4866906995ULL, 3220959471609924ULL,
649165714851ULL, 3184943915608436ULL, 570691368417972ULL, 595804498035ULL,
124295042ULL, 431498018963ULL, 508238522371ULL, 91518530ULL,
318240155763ULL, 291789778348404ULL, 1830001131721892ULL, 375363605923ULL,
777781811075ULL, 1136111028516116ULL, 3097834205243396ULL, 508001629971ULL,
2663607373704004ULL, 680242583802939237ULL, 333380770766129845ULL, 179746658ULL,
42545ULL, 138437538ULL, 93365810ULL, 713842853011ULL,
73602098ULL, 69575510115ULL, 23964357683ULL, 868078761575828ULL,
28681778ULL, 713778574611ULL, 250912709379ULL, 2323825233181284ULL,
302080811955ULL, 3184439127991172ULL, 1694042660682596ULL, 796909779811ULL,
176306722ULL, 150327278147ULL, 619854856867ULL, 1005252473234484ULL,
211025400963ULL, 36712706ULL, 360743481544788ULL, 150627258963ULL,
117482600995ULL, 1024968212107700ULL, 2535169275963444ULL, 4734473194086550421ULL,
628107696687956ULL, 9399128243ULL, 5198438490361643573ULL, 194220594ULL,
104474994ULL, 566996932387ULL, 427920028243ULL, 2014821863433780ULL,
492093858627ULL, 147361150235284ULL, 2005882975110676ULL, 9671606099636618005ULL,
777701008947ULL, 3185463219618820ULL, 482784926917540ULL, 2900953068249785909ULL,
1754182023747364ULL, 4274848857537943333ULL, 13198752741767688709ULL, 2015093490989156ULL,
591272318771ULL, 2659758091419812ULL, 1531044293118596ULL, 298306479155ULL,
408509245114388ULL, 210504348563ULL, 9248164405801223541ULL, 91321106ULL,
2660352816454484ULL, 680170263324308757ULL, 8333659837799955077ULL, 482966828984116ULL,
4274926723105633605ULL, 3184439197724820ULL, 192104450ULL, 15217ULL,
45937ULL, 129205250ULL, 129208402ULL, 529245952323ULL,
169097138ULL, 770695537027ULL, 382310500883ULL, 2838550742137652ULL,
122763026ULL, 277045793139ULL, 81608128403ULL, 1991870397907988ULL,
362778151475ULL, 2059003085103236ULL, 2132572377842852ULL, 655681091891ULL,
58419234ULL, 239280858627ULL, 529092143139ULL, 1568257451898804ULL,
447235128115ULL, 679678845236084ULL, 2167161349491220ULL, 1554184567314086709ULL,
165479003923ULL, 1428768988226596ULL, 977710670185060ULL, 10550024711307499077ULL,
1305410032576132ULL, 11779770265620358997ULL, 333446212255967269ULL, 978168444447012ULL,
162736434ULL, 35596216627ULL, 138295313843ULL, 891861543990356ULL,
692616541075ULL, 3151866750863876ULL, 100103641866564ULL, 6572336607016932133ULL,
215036012883ULL, 726936420696196ULL, 52433666ULL, 82160664963ULL,
2588613720361524ULL, 5802089162353039525ULL, 214799000387ULL, 144876322ULL,
668013605731ULL, 110616894681956ULL, 1601657732871812ULL, 430945547955ULL,
3156382366321172ULL, 7644494644932993285ULL, 3928124806469601813ULL, 3155990846772900ULL,
339991010498708ULL, 10743689387941597493ULL, 5103845475ULL, 105070898ULL,
3928064910068824213ULL, 156265010ULL, 1305138421793636ULL, 27185ULL,
195459938ULL, 567044449971ULL, 382447549283ULL, 2175279159592324ULL,
443529919251ULL, 195059004769796ULL, 2165424908404116ULL, 1554158691063110021ULL,
504228368803ULL, 1436350466655236ULL, 27584723588724ULL, 1900945754488837749ULL,
122971970ULL, 443829749251ULL, 302601798803ULL, 108558722ULL,
724700725875ULL, 43570095105972ULL, 2295263717447940ULL, 2860446751369014181ULL,
2165106202149444ULL, 69275726195ULL, 2860543885641537797ULL, 2165106320445780ULL,
2280890014640004ULL, 11820349930268368933ULL, 8721082628082003989ULL, 127050770ULL,
503707084675ULL, 122834978ULL, 2538193642857604ULL, 10129ULL,
801441490467ULL, 2923200302876740ULL, 1443359556281892ULL, 2901063790822564949ULL,
2728339631923524ULL, 7103874718248233397ULL, 12775311047932294245ULL, 95520290ULL,
2623783208098404ULL, 1900908618382410757ULL, 137742672547ULL, 2323440239468964ULL,
362478212387ULL, 727199575803140ULL, 73425410ULL, 34337ULL,
163101314ULL, 668566030659ULL, 801204361987ULL, 73030562ULL,
591509145619ULL, 162574594ULL, 100608342969108ULL, 5553ULL,
724147968595ULL, 1436604830452292ULL, 176259090ULL, 42001ULL,
143955266ULL, 2385ULL, 18433ULL, 0ULL,
};
__host__ __device__ int offset_3d(const Vec3i &p, const Vec3i &size)
{
return (p.z * size.y + p.y) * size.x + p.x;
}
__host__ __device__ int offset_3d(const int x, const int y, const int z, const int sizex, const int sizey, const int sizez)
{
// return (p.z * size.y + p.y) * size.x + p.x;
return (z*sizey + y)*sizex + x;
}
__host__ __device__ bool valid(int x, int y, int z, int dimx, int dimy, int dimz)
{
if ( x >= 0 && x < dimx)
if ( y >= 0 && y < dimy)
if ( z >= 0 && z < dimz)
return true;
return false;
}
__host__ __device__ void triangle(Vertex &va, Vertex &vb, Vertex &vc)
{
const Vec3f ab = va.position - vb.position;
const Vec3f cb = vc.position - vb.position;
const Vec3f n = cross(cb, ab);
va.normal += n;
vb.normal += n;
vc.normal += n;
}
__host__ __device__ void do_edge (Vertex* edge_indices, int n_edge, float va, float vb, int axis, const Vec3f &base) {
if ((va < 0.0) == (vb < 0.0))
return;
Vec3f v = base;
v[axis] += va / (va - vb);
edge_indices[n_edge] = {v,Vec3f(0)};
//edge_indices[n_edge] = {v};
};
__device__ void do_edge (float* edge_indices, int n_edge, float va, float vb, int axis, int x, int y, int z) {
if ((va < 0.0) == (vb < 0.0))
return;
float v[3];
v[0] = x*1.0;
v[1] = y*1.0;
v[2] = z*1.0;
v[axis] += va / (va - vb);
edge_indices[6*n_edge+0] = v[0];
edge_indices[6*n_edge+1] = v[1];
edge_indices[6*n_edge+2] = v[2];
edge_indices[6*n_edge+3] = 0.0f;
edge_indices[6*n_edge+4] = 0.0f;
edge_indices[6*n_edge+5] = 0.0f;
};
//__device__ unsigned int num_vert[1] = {0};
//generate_geometry_kernel<<<grid,block>>>(d_voxels,d_vertices,marching_cube_tris_gpu,num_vert);
//__global__ void generate_geometry_kernel(float* voxels, Vertex* vertices)
extern "C"{
__global__ void mc_kernel(Vertex* vertices,float* voxels, int bidx, int boff, int dimx, int dimy, int dimz, unsigned int* num_vert,int* isolist, int start, int end, int N, float* color_table, int table_num)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int z = threadIdx.z + blockDim.z * blockIdx.z;
//printf("Call kernel [%d,%d,%d] \n",x,y,z);
if ( x >= 0 && y >= 0 && z > 0 &&
x < dimx && y < dimy && z < dimz+1){
float vs[8];
for (int j = start ; j < end ; j++)
{
int isovalue = isolist[j];
if (isovalue == 0)
continue;
for (int i =0 ; i <8;i++)
vs[i] =0;
if (valid(x ,y ,z ,dimx,dimy,dimz+2))
vs[0] = voxels[offset_3d({x, y, z}, Vec3i((dimx)))];
if (valid(x+1,y ,z ,dimx,dimy,dimz+2))
vs[1] = voxels[offset_3d({x+1, y, z}, Vec3i((dimx)))];
if (valid(x ,y+1,z ,dimx,dimy,dimz+2))
vs[2] = voxels[offset_3d({x, y+1, z}, Vec3i((dimx)))];
if (valid(x+1,y+1,z ,dimx,dimy,dimz+2))
vs[3] = voxels[offset_3d({x+1, y+1, z}, Vec3i((dimx)))];
if (valid(x ,y ,z+1,dimx,dimy,dimz+2))
vs[4] = voxels[offset_3d({x, y, z+1}, Vec3i((dimx)))];
if (valid(x+1,y ,z+1,dimx,dimy,dimz+2))
vs[5] = voxels[offset_3d({x+1, y, z+1}, Vec3i((dimx)))];
if (valid(x ,y+1,z+1,dimx,dimy,dimz+2))
vs[6] = voxels[offset_3d({x, y+1, z+1}, Vec3i((dimx)))];
if (valid(x+1,y+1,z+1,dimx,dimy,dimz+2))
vs[7] = voxels[offset_3d({x+1, y+1, z+1}, Vec3i((dimx)))];
int check = 0;
for (int i = 0 ; i < 8 ; i++)
if ((int)vs[i] == isovalue)
check++;
if (isovalue == 0 || check ==0) continue;
for (int i = 0 ; i < 8 ; i++){
vs[i] = abs(vs[i] - isovalue) -0.5;
}
const int config_n =
((vs[0] < 0.0f) << 0) | // *1
((vs[1] < 0.0f) << 1) | // *2
((vs[2] < 0.0f) << 2) | // *4
((vs[3] < 0.0f) << 3) | // *8
((vs[4] < 0.0f) << 4) | // *16
((vs[5] < 0.0f) << 5) | // *32
((vs[6] < 0.0f) << 6) | // *64
((vs[7] < 0.0f) << 7); // *128
if (config_n == 0 || config_n == 255)
continue;
//int index_base1 = atomicAdd(num_vert,1);
// vector<Vertex> vert;
// int edge_indices[12];
//float edge_indices[12*6];
num_vert[1] = bidx;
z -= 1;
z += bidx*boff;
x -= N/2;
y -= N/2;
z -= N/2;
Vertex edge_indices[12];
do_edge(edge_indices, 0, vs[0], vs[1], 0, Vec3f(x, y, z));
do_edge(edge_indices, 1, vs[2], vs[3], 0, Vec3f(x, y+1, z));
do_edge(edge_indices, 2, vs[4], vs[5], 0, Vec3f(x, y, z+1));
do_edge(edge_indices, 3, vs[6], vs[7], 0, Vec3f(x, y+1, z+1));
do_edge(edge_indices, 4, vs[0], vs[2], 1, Vec3f(x, y, z));
do_edge(edge_indices, 5, vs[1], vs[3], 1, Vec3f(x+1, y, z));
do_edge(edge_indices, 6, vs[4], vs[6], 1, Vec3f(x, y, z+1));
do_edge(edge_indices, 7, vs[5], vs[7], 1, Vec3f(x+1, y, z+1));
do_edge(edge_indices, 8, vs[0], vs[4], 2, Vec3f(x, y, z));
do_edge(edge_indices, 9, vs[1], vs[5], 2, Vec3f(x+1, y, z));
do_edge(edge_indices, 10, vs[2], vs[6], 2, Vec3f(x, y+1, z));
do_edge(edge_indices, 11, vs[3], vs[7], 2, Vec3f(x+1, y+1, z));
/* */
/* do_edge(edge_indices, 0, vs[0], vs[1], 0, x, y, z); */
/* do_edge(edge_indices, 1, vs[2], vs[3], 0, x, y+1, z); */
/* do_edge(edge_indices, 2, vs[4], vs[5], 0, x, y, z+1); */
/* do_edge(edge_indices, 3, vs[6], vs[7], 0, x, y+1, z+1); */
/* */
/* do_edge(edge_indices, 4, vs[0], vs[2], 1, x, y, z); */
/* do_edge(edge_indices, 5, vs[1], vs[3], 1, x+1, y, z); */
/* do_edge(edge_indices, 6, vs[4], vs[6], 1, x, y, z+1); */
/* do_edge(edge_indices, 7, vs[5], vs[7], 1, x+1, y, z+1); */
/* */
/* do_edge(edge_indices, 8, vs[0], vs[4], 2, x, y, z); */
/* do_edge(edge_indices, 9, vs[1], vs[5], 2, x+1, y, z); */
/* do_edge(edge_indices, 10, vs[2], vs[6], 2, x, y+1, z); */
/* do_edge(edge_indices, 11, vs[3], vs[7], 2, x+1, y+1, z); */
const uint64_t config = marching_cube_tris[config_n];
const int n_triangles = config & 0xF; // Maximum 15
const int n_indices = n_triangles * 3; // Maximu 45
int offset = 4;
int index_base = atomicAdd(num_vert,n_indices);
//num_vert+= n_indices;
/* */
/* for (int i = 0; i < n_indices; i++) { */
/* const int edge = (config >> offset) & 0xF; */
/* //vertices.push_back(edge_indices[edge]); */
/* // vertices[3*index_base+0] = bidx*boff; */
/* // vertices[3*index_base+1] = bidx*boff; */
/* // vertices[3*index_base+2] = bidx*boff; */
/* vertices[3*index_base+0] = edge_indices[3*edge+0]; */
/* vertices[3*index_base+1] = edge_indices[3*edge+1]; */
/* vertices[3*index_base+2] = edge_indices[3*edge+2] + bidx*boff; */
/* index_base ++; */
/* offset += 4; */
/* } */
/* */
//if (index_base + n_indices< MAXIMUM_NUM){
if (true){
int index = index_base;
for (int i = 0; i < n_indices; i++) {
const int edge = (config >> offset) & 0xF;
//vertices.push_back(edge_indices[edge]);
vertices[index++] = edge_indices[edge];
offset += 4;
}
for (int i = 0; i < n_triangles; i++) {
triangle(vertices[index_base+i*3+2],
vertices[index_base+i*3+1],
vertices[index_base+i*3+0]);
}
for (int i = 0; i < n_indices; i++) {
vertices[index_base+i].normal = normalize(vertices[index_base+i].normal);
vertices[index_base+i].position.data[0] /= N;
vertices[index_base+i].position.data[1] /= N;
vertices[index_base+i].position.data[2] /= N;
int color_num = j%table_num;
vertices[index_base+i].color.data[0] = color_table[3*color_num+0];
vertices[index_base+i].color.data[1] = color_table[3*color_num+1];
vertices[index_base+i].color.data[2] = color_table[3*color_num+2];
/*
if ( j % 4 == 0){dd
vertices[index_base+i].color.data[0] = 0.2;
vertices[index_base+i].color.data[1] = 1.0;
vertices[index_base+i].color.data[2] = 0.2;
}
else if ( j % 4 == 1){
vertices[index_base+i].color.data[0] = 1.0;
vertices[index_base+i].color.data[1] = 0.2;
vertices[index_base+i].color.data[2] = 0.2;
}
else if ( j % 4 == 2){
vertices[index_base+i].color.data[0] = 0.2;
vertices[index_base+i].color.data[1] = 0.2;
vertices[index_base+i].color.data[2] = 1.0;
}else {
vertices[index_base+i].color.data[0] = 0.2;
vertices[index_base+i].color.data[1] = 1.0;
vertices[index_base+i].color.data[2] = 1.0;
}*/
}
}
}
}
}
//__global__ void mc_kernel(Vertex* vertices,float* voxels, int bidx, int boff, int dimx, int dimy, int dimz, unsigned int* num_vert,int* isolist, int start, int end, int N)
__global__ void composite(char* output, char* input, int num_img, int dim_x, int dim_y, int dummy1, int dummy2, unsigned int* num_vert) {
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
if(dim_x <= idx_x || dim_y <= idx_y) return;
int index = (idx_y * dim_x + idx_x);
char* img1 = input;
float *depth_table1 = (float*) (img1 + dim_x*dim_y*3);
float val1 = depth_table1[index];
char* img = img1;
//RGB Copy
output[3*index+0] = img[3*index+0];
output[3*index+1] = img[3*index+1];
output[3*index+2] = img[3*index+2];
for (int i = 1 ; i< num_img ;i++)
{
char* img2 = input+(dim_x*dim_y*7)*i;
float *depth_table2 = (float*) (img2 + dim_x*dim_y*3);
float val2 = depth_table2[index];
// unsigned char* image = (unsigned char*) input;
if (val1 > val2){
val1 = val2;
//RGB Copy
output[3*index+0] = img2[3*index+0];
output[3*index+1] = img2[3*index+1];
output[3*index+2] = img2[3*index+2];
}
}
return ;
}
}
|
71cffb6e3e7a7747946f559c974d0311c70c2ea0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_drhoEpudx_kernel;
int xdim0_drhoEpudx_kernel_h = -1;
int ydim0_drhoEpudx_kernel_h = -1;
__constant__ int xdim1_drhoEpudx_kernel;
int xdim1_drhoEpudx_kernel_h = -1;
int ydim1_drhoEpudx_kernel_h = -1;
__constant__ int xdim2_drhoEpudx_kernel;
int xdim2_drhoEpudx_kernel_h = -1;
int ydim2_drhoEpudx_kernel_h = -1;
__constant__ int xdim3_drhoEpudx_kernel;
int xdim3_drhoEpudx_kernel_h = -1;
int ydim3_drhoEpudx_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
// user function
__device__
void
drhoEpudx_kernel_gpu(const double *rhou_new, const double *rho_new,
const double *rhoE_new, double *rhoE_res) {
double fni =
rhou_new[OPS_ACC0(0)] * rhou_new[OPS_ACC0(0)] / rho_new[OPS_ACC1(0)];
double p = gam1 * (rhoE_new[OPS_ACC2(0)] - 0.5 * fni);
fni = (rhoE_new[OPS_ACC2(0)] + p) * rhou_new[OPS_ACC0(0)] /
rho_new[OPS_ACC1(0)];
double fnim1 =
rhou_new[OPS_ACC0(-1)] * rhou_new[OPS_ACC0(-1)] / rho_new[OPS_ACC1(-1)];
p = gam1 * (rhoE_new[OPS_ACC2(-1)] - 0.5 * fnim1);
fnim1 = (rhoE_new[OPS_ACC2(-1)] + p) * rhou_new[OPS_ACC0(-1)] /
rho_new[OPS_ACC1(-1)];
double fnim2 =
rhou_new[OPS_ACC0(-2)] * rhou_new[OPS_ACC0(-2)] / rho_new[OPS_ACC1(-2)];
p = gam1 * (rhoE_new[OPS_ACC2(-2)] - 0.5 * fnim2);
fnim2 = (rhoE_new[OPS_ACC2(-2)] + p) * rhou_new[OPS_ACC0(-2)] /
rho_new[OPS_ACC1(-2)];
double fnip1 =
rhou_new[OPS_ACC0(1)] * rhou_new[OPS_ACC0(1)] / rho_new[OPS_ACC1(1)];
p = gam1 * (rhoE_new[OPS_ACC2(1)] - 0.5 * fnip1);
fnip1 = (rhoE_new[OPS_ACC2(1)] + p) * rhou_new[OPS_ACC0(1)] /
rho_new[OPS_ACC1(1)];
double fnip2 =
rhou_new[OPS_ACC0(2)] * rhou_new[OPS_ACC0(2)] / rho_new[OPS_ACC1(2)];
p = gam1 * (rhoE_new[OPS_ACC2(2)] - 0.5 * fnip2);
fnip2 = (rhoE_new[OPS_ACC2(2)] + p) * rhou_new[OPS_ACC0(2)] /
rho_new[OPS_ACC1(2)];
double deriv = (fnim2 - fnip2 + 8.0 * (fnip1 - fnim1)) / (12.00 * dx);
rhoE_res[OPS_ACC3(0)] = deriv;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_drhoEpudx_kernel(const double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
if (idx_x < size0) {
drhoEpudx_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_drhoEpudx_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 5))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5, "drhoEpudx_kernel");
OPS_kernels[5].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_drhoEpudx_kernel_h || xdim1 != xdim1_drhoEpudx_kernel_h ||
xdim2 != xdim2_drhoEpudx_kernel_h || xdim3 != xdim3_drhoEpudx_kernel_h) {
hipMemcpyToSymbol(xdim0_drhoEpudx_kernel, &xdim0, sizeof(int));
xdim0_drhoEpudx_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_drhoEpudx_kernel, &xdim1, sizeof(int));
xdim1_drhoEpudx_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_drhoEpudx_kernel, &xdim2, sizeof(int));
xdim2_drhoEpudx_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_drhoEpudx_kernel, &xdim3, sizeof(int));
xdim3_drhoEpudx_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_drhoEpudx_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[5].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[3], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| 71cffb6e3e7a7747946f559c974d0311c70c2ea0.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_drhoEpudx_kernel;
int xdim0_drhoEpudx_kernel_h = -1;
int ydim0_drhoEpudx_kernel_h = -1;
__constant__ int xdim1_drhoEpudx_kernel;
int xdim1_drhoEpudx_kernel_h = -1;
int ydim1_drhoEpudx_kernel_h = -1;
__constant__ int xdim2_drhoEpudx_kernel;
int xdim2_drhoEpudx_kernel_h = -1;
int ydim2_drhoEpudx_kernel_h = -1;
__constant__ int xdim3_drhoEpudx_kernel;
int xdim3_drhoEpudx_kernel_h = -1;
int ydim3_drhoEpudx_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
// user function
__device__
void
drhoEpudx_kernel_gpu(const double *rhou_new, const double *rho_new,
const double *rhoE_new, double *rhoE_res) {
double fni =
rhou_new[OPS_ACC0(0)] * rhou_new[OPS_ACC0(0)] / rho_new[OPS_ACC1(0)];
double p = gam1 * (rhoE_new[OPS_ACC2(0)] - 0.5 * fni);
fni = (rhoE_new[OPS_ACC2(0)] + p) * rhou_new[OPS_ACC0(0)] /
rho_new[OPS_ACC1(0)];
double fnim1 =
rhou_new[OPS_ACC0(-1)] * rhou_new[OPS_ACC0(-1)] / rho_new[OPS_ACC1(-1)];
p = gam1 * (rhoE_new[OPS_ACC2(-1)] - 0.5 * fnim1);
fnim1 = (rhoE_new[OPS_ACC2(-1)] + p) * rhou_new[OPS_ACC0(-1)] /
rho_new[OPS_ACC1(-1)];
double fnim2 =
rhou_new[OPS_ACC0(-2)] * rhou_new[OPS_ACC0(-2)] / rho_new[OPS_ACC1(-2)];
p = gam1 * (rhoE_new[OPS_ACC2(-2)] - 0.5 * fnim2);
fnim2 = (rhoE_new[OPS_ACC2(-2)] + p) * rhou_new[OPS_ACC0(-2)] /
rho_new[OPS_ACC1(-2)];
double fnip1 =
rhou_new[OPS_ACC0(1)] * rhou_new[OPS_ACC0(1)] / rho_new[OPS_ACC1(1)];
p = gam1 * (rhoE_new[OPS_ACC2(1)] - 0.5 * fnip1);
fnip1 = (rhoE_new[OPS_ACC2(1)] + p) * rhou_new[OPS_ACC0(1)] /
rho_new[OPS_ACC1(1)];
double fnip2 =
rhou_new[OPS_ACC0(2)] * rhou_new[OPS_ACC0(2)] / rho_new[OPS_ACC1(2)];
p = gam1 * (rhoE_new[OPS_ACC2(2)] - 0.5 * fnip2);
fnip2 = (rhoE_new[OPS_ACC2(2)] + p) * rhou_new[OPS_ACC0(2)] /
rho_new[OPS_ACC1(2)];
double deriv = (fnim2 - fnip2 + 8.0 * (fnip1 - fnim1)) / (12.00 * dx);
rhoE_res[OPS_ACC3(0)] = deriv;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_drhoEpudx_kernel(const double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
if (idx_x < size0) {
drhoEpudx_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_drhoEpudx_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 5))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5, "drhoEpudx_kernel");
OPS_kernels[5].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_drhoEpudx_kernel_h || xdim1 != xdim1_drhoEpudx_kernel_h ||
xdim2 != xdim2_drhoEpudx_kernel_h || xdim3 != xdim3_drhoEpudx_kernel_h) {
cudaMemcpyToSymbol(xdim0_drhoEpudx_kernel, &xdim0, sizeof(int));
xdim0_drhoEpudx_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_drhoEpudx_kernel, &xdim1, sizeof(int));
xdim1_drhoEpudx_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_drhoEpudx_kernel, &xdim2, sizeof(int));
xdim2_drhoEpudx_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_drhoEpudx_kernel, &xdim3, sizeof(int));
xdim3_drhoEpudx_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_drhoEpudx_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[5].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[3], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[5].mpi_time += t2 - t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
848067a8634bdb9f50e56c426098ed70ff23160d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Tells the CUDA C++ compiler that this is a function (kernel) that runs on the GPU and can be called from CPU code
// These __global__ functions are known as kernels, and code that runs on the GPU is often called device code, while code that runs on the CPU is host code.
__global__ void add(int n, float *x, float *y) {
for (int i = 0; i < n; ++i) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 20; // 1M
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
float max_error = 0.0f;
for (int i = 0; i < N; ++i) {
max_error = fmax(max_error, fabs(y[i] - 3.0f));
}
std::cout << "max error: " << max_error << std::endl;
// free memory
hipFree(x);
hipFree(y);
return 0;
} | 848067a8634bdb9f50e56c426098ed70ff23160d.cu | #include <iostream>
#include <math.h>
// Tells the CUDA C++ compiler that this is a function (kernel) that runs on the GPU and can be called from CPU code
// These __global__ functions are known as kernels, and code that runs on the GPU is often called device code, while code that runs on the CPU is host code.
__global__ void add(int n, float *x, float *y) {
for (int i = 0; i < n; ++i) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 20; // 1M
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<1, 1>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float max_error = 0.0f;
for (int i = 0; i < N; ++i) {
max_error = fmax(max_error, fabs(y[i] - 3.0f));
}
std::cout << "max error: " << max_error << std::endl;
// free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
776ed30abcf11c4137cd6d835885d59316184594.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
// MODIFIED
int posX = blockIdx.x * blockDim.x + threadIdx.x;
int posY = blockIdx.y * blockDim.y + threadIdx.y;
if ( posX >= numCols || posY >= numRows ) {
return;
}
int idx = posY * numCols + posX;
// copy filter to shared memory
extern __shared__ float sh_filter[];
if (idx < filterWidth * filterWidth) {
sh_filter[idx] = filter[idx];
}
__syncthreads();
// TODO: copy channel to shared memory if possible
// blur happens here
float result = 0.0;
// For every value in the filter around the pixel, r: Row, c: Col
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
// Find the global image position for this filter position
// clamp to boundary of the image
int image_r = min(max(posY + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(posX + filter_c, 0), static_cast<int>(numCols - 1));
// TODO: check if r * ncols + c is faster than r + c * nrows
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[idx] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// MODIFIED
int posX = blockIdx.x * blockDim.x + threadIdx.x;
int posY = blockIdx.y * blockDim.y + threadIdx.y;
if ( posX >= numCols || posY >= numRows ) {
return;
}
int idx = posY * numCols + posX;
redChannel[idx] = inputImageRGBA[idx].x;
greenChannel[idx] = inputImageRGBA[idx].y;
blueChannel[idx] = inputImageRGBA[idx].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// MODIFIED
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
// MODIFIED
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth,
hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
// MODIFIED
int h_nblock = 16;
int h_ngridr = 0;
int h_ngridc = 0;
if (numRows % h_nblock) {
h_ngridr = numRows / h_nblock + 1;
} else {
h_ngridr = numRows / h_nblock;
}
if (numCols % h_nblock) {
h_ngridc = numCols / h_nblock + 1;
} else {
h_ngridc = numCols / h_nblock;
}
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(h_nblock, h_nblock, 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
const dim3 gridSize(h_ngridc, h_ngridr);
// MODIFIED
// Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// MODIFIED
// Call your convolution kernel here 3 times, once for each color channel.
size_t bytesShared = sizeof(float) * filterWidth * filterWidth;
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), bytesShared, 0, d_red, d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), bytesShared, 0, d_green, d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), bytesShared, 0, d_blue, d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 776ed30abcf11c4137cd6d835885d59316184594.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
// MODIFIED
int posX = blockIdx.x * blockDim.x + threadIdx.x;
int posY = blockIdx.y * blockDim.y + threadIdx.y;
if ( posX >= numCols || posY >= numRows ) {
return;
}
int idx = posY * numCols + posX;
// copy filter to shared memory
extern __shared__ float sh_filter[];
if (idx < filterWidth * filterWidth) {
sh_filter[idx] = filter[idx];
}
__syncthreads();
// TODO: copy channel to shared memory if possible
// blur happens here
float result = 0.0;
// For every value in the filter around the pixel, r: Row, c: Col
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
// Find the global image position for this filter position
// clamp to boundary of the image
int image_r = min(max(posY + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(posX + filter_c, 0), static_cast<int>(numCols - 1));
// TODO: check if r * ncols + c is faster than r + c * nrows
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[idx] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// MODIFIED
int posX = blockIdx.x * blockDim.x + threadIdx.x;
int posY = blockIdx.y * blockDim.y + threadIdx.y;
if ( posX >= numCols || posY >= numRows ) {
return;
}
int idx = posY * numCols + posX;
redChannel[idx] = inputImageRGBA[idx].x;
greenChannel[idx] = inputImageRGBA[idx].y;
blueChannel[idx] = inputImageRGBA[idx].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// MODIFIED
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
// MODIFIED
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth,
cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
// MODIFIED
int h_nblock = 16;
int h_ngridr = 0;
int h_ngridc = 0;
if (numRows % h_nblock) {
h_ngridr = numRows / h_nblock + 1;
} else {
h_ngridr = numRows / h_nblock;
}
if (numCols % h_nblock) {
h_ngridc = numCols / h_nblock + 1;
} else {
h_ngridc = numCols / h_nblock;
}
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(h_nblock, h_nblock, 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
const dim3 gridSize(h_ngridc, h_ngridr);
// MODIFIED
// Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// MODIFIED
// Call your convolution kernel here 3 times, once for each color channel.
size_t bytesShared = sizeof(float) * filterWidth * filterWidth;
gaussian_blur<<<gridSize, blockSize, bytesShared>>>(d_red, d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, bytesShared>>>(d_green, d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, bytesShared>>>(d_blue, d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
b75419751f3e27b24fc109a74390029f0198eee0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<ctime>
#include<assert.h>
#include<algorithm>
#define EACH_THREAD 256
#define SIZE 1024
using namespace std;
template<typename T>
__device__ unsigned int convert(T t) {
assert(false);
}
template<>
__device__ __host__ unsigned int convert(float v) {
// if v >= 0 v |= 2^31 else v = ~(v) - 1,ensure anyif v >=0 or v < 0 by bitwise compare is vaild
unsigned int cmp = *reinterpret_cast<unsigned int*>(&v);
unsigned int ret = (cmp & (1<<31)) ? ~(cmp): (cmp | 0x80000000);
return ret;
}
__device__ __host__ float deconvert(unsigned int v) {
unsigned int tmp = (v & (1 << 31)) ? (v ^ 0x80000000) : ~(v);
return *reinterpret_cast<float*>(&tmp);
}
template<unsigned int each_thread>
void __global__ RadixSort(float* data, unsigned int* sort_tmp0,unsigned int* sort_tmp1, unsigned int n) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * each_thread;
unsigned int tid = threadIdx.x;
if(idx >= n) {
return ;
}
//bitwise push
for (unsigned int bit = 0;bit < 32;bit++) {
unsigned int mask = 1<<bit;
unsigned int cnt0 = 0,cnt1 = 0;
for(unsigned int i = tid;i < n;i += blockDim.x) {
unsigned int elem = (bit == 0 ? convert(data[i]):sort_tmp0[i]);
if((elem&mask) != 0) {
sort_tmp1[cnt1 + tid] = elem;
cnt1 += blockDim.x;
}
else {
sort_tmp0[cnt0 + tid] = elem;
cnt0 += blockDim.x;
}
}
for (unsigned int i = 0;i < cnt1;i += blockDim.x) {
sort_tmp0[cnt0 + i + tid] = sort_tmp1[i + tid];
}
}
//merge
__shared__ unsigned int min_value, min_tid;
__shared__ unsigned int list_idx[1024];
unsigned int elem = 0xffffffff;
list_idx[tid] = 0;
__syncthreads();
for(unsigned int i = 0;i < n;i++) {
unsigned int x = (list_idx[tid] * blockDim.x) + tid;
if(x < n) {
elem = sort_tmp0[x];
}
else {
elem = 0xffffffff;
}
__syncthreads();
min_value = min_tid = 0xffffffff;
atomicMin(&min_value,elem);
__syncthreads();
if(min_value == elem) {
atomicMin(&min_tid, tid);
}
__syncthreads();
if(min_tid == tid) {
list_idx[tid]++;
data[i] = deconvert(min_value);
}
}
}
void RadixSortHost(float *v,unsigned int size) {
unsigned int sort_tmp0[size],sort_tmp1[size];
for(int bit = 0;bit < 32;bit++) {
unsigned int mask = 1 << bit;
unsigned int cnt0 = 0,cnt1 = 0;
for (int i = 0;i < size;i++) {
unsigned int elem = ((bit == 0) ?convert(v[i]):sort_tmp0[i]);
if((elem & mask) != 0) {
sort_tmp1[cnt1++] = elem;
}
else {
sort_tmp0[cnt0++] = elem;
}
}
for(int i = 0;i < cnt1;i++) {
sort_tmp0[cnt0 + i] = sort_tmp1[i];
}
}
for(int i = 0;i < size;i++) {
v[i] = deconvert(sort_tmp0[i]);
}
}
__shared__ unsigned int sort_tmp0[SIZE * EACH_THREAD];
__shared__ unsigned int sort_tmp1[SIZE * EACH_THREAD];
int main() {
int N = SIZE * EACH_THREAD;
float a[N],b[N];
auto init = [](auto*a ,unsigned int size)->void {
for(int i = 0;i < size;i++) {
a[i] = pow(-1,i) * (random()%1000);
}
};
init(a, N);
float *a_dev;
clock_t start ,end;
unsigned int *sort_tmp0, *sort_tmp1;
dim3 block(SIZE,1);
dim3 grid( 1, 1);
hipMalloc((void**)&a_dev, sizeof(float)*N);
hipMalloc((void**)&sort_tmp0, sizeof(unsigned int)*N);
hipMalloc((void**)&sort_tmp1, sizeof(unsigned int)*N);
hipMemcpy(a_dev, a, sizeof(float)*N,hipMemcpyHostToDevice);
hipDeviceSynchronize();
start = clock();
hipLaunchKernelGGL(( RadixSort<EACH_THREAD>), dim3(grid),dim3(block), 0, 0, a_dev, sort_tmp0, sort_tmp1, N);
hipDeviceSynchronize();
end = clock();
cout << "gpu time:" << end - start << endl;
hipMemcpy(b, a_dev, sizeof(float)*N, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for (int i = 0;i < N;i++) {
// cout << b[i] <<" ";
}
cout << endl;
RadixSortHost(a,N);
for (int i = 0;i < N;i++) {
if(a[i] != b[i]) {
printf("error: index%u gpu:%f cpu:%f\n",i,b[i], a[i]);
}
}
return 0;
}
| b75419751f3e27b24fc109a74390029f0198eee0.cu | #include<iostream>
#include<ctime>
#include<assert.h>
#include<algorithm>
#define EACH_THREAD 256
#define SIZE 1024
using namespace std;
template<typename T>
__device__ unsigned int convert(T t) {
assert(false);
}
template<>
__device__ __host__ unsigned int convert(float v) {
// if v >= 0 v |= 2^31 else v = ~(v) - 1,ensure anyif v >=0 or v < 0 by bitwise compare is vaild
unsigned int cmp = *reinterpret_cast<unsigned int*>(&v);
unsigned int ret = (cmp & (1<<31)) ? ~(cmp): (cmp | 0x80000000);
return ret;
}
__device__ __host__ float deconvert(unsigned int v) {
unsigned int tmp = (v & (1 << 31)) ? (v ^ 0x80000000) : ~(v);
return *reinterpret_cast<float*>(&tmp);
}
template<unsigned int each_thread>
void __global__ RadixSort(float* data, unsigned int* sort_tmp0,unsigned int* sort_tmp1, unsigned int n) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * each_thread;
unsigned int tid = threadIdx.x;
if(idx >= n) {
return ;
}
//bitwise push
for (unsigned int bit = 0;bit < 32;bit++) {
unsigned int mask = 1<<bit;
unsigned int cnt0 = 0,cnt1 = 0;
for(unsigned int i = tid;i < n;i += blockDim.x) {
unsigned int elem = (bit == 0 ? convert(data[i]):sort_tmp0[i]);
if((elem&mask) != 0) {
sort_tmp1[cnt1 + tid] = elem;
cnt1 += blockDim.x;
}
else {
sort_tmp0[cnt0 + tid] = elem;
cnt0 += blockDim.x;
}
}
for (unsigned int i = 0;i < cnt1;i += blockDim.x) {
sort_tmp0[cnt0 + i + tid] = sort_tmp1[i + tid];
}
}
//merge
__shared__ unsigned int min_value, min_tid;
__shared__ unsigned int list_idx[1024];
unsigned int elem = 0xffffffff;
list_idx[tid] = 0;
__syncthreads();
for(unsigned int i = 0;i < n;i++) {
unsigned int x = (list_idx[tid] * blockDim.x) + tid;
if(x < n) {
elem = sort_tmp0[x];
}
else {
elem = 0xffffffff;
}
__syncthreads();
min_value = min_tid = 0xffffffff;
atomicMin(&min_value,elem);
__syncthreads();
if(min_value == elem) {
atomicMin(&min_tid, tid);
}
__syncthreads();
if(min_tid == tid) {
list_idx[tid]++;
data[i] = deconvert(min_value);
}
}
}
void RadixSortHost(float *v,unsigned int size) {
unsigned int sort_tmp0[size],sort_tmp1[size];
for(int bit = 0;bit < 32;bit++) {
unsigned int mask = 1 << bit;
unsigned int cnt0 = 0,cnt1 = 0;
for (int i = 0;i < size;i++) {
unsigned int elem = ((bit == 0) ?convert(v[i]):sort_tmp0[i]);
if((elem & mask) != 0) {
sort_tmp1[cnt1++] = elem;
}
else {
sort_tmp0[cnt0++] = elem;
}
}
for(int i = 0;i < cnt1;i++) {
sort_tmp0[cnt0 + i] = sort_tmp1[i];
}
}
for(int i = 0;i < size;i++) {
v[i] = deconvert(sort_tmp0[i]);
}
}
__shared__ unsigned int sort_tmp0[SIZE * EACH_THREAD];
__shared__ unsigned int sort_tmp1[SIZE * EACH_THREAD];
int main() {
int N = SIZE * EACH_THREAD;
float a[N],b[N];
auto init = [](auto*a ,unsigned int size)->void {
for(int i = 0;i < size;i++) {
a[i] = pow(-1,i) * (random()%1000);
}
};
init(a, N);
float *a_dev;
clock_t start ,end;
unsigned int *sort_tmp0, *sort_tmp1;
dim3 block(SIZE,1);
dim3 grid( 1, 1);
cudaMalloc((void**)&a_dev, sizeof(float)*N);
cudaMalloc((void**)&sort_tmp0, sizeof(unsigned int)*N);
cudaMalloc((void**)&sort_tmp1, sizeof(unsigned int)*N);
cudaMemcpy(a_dev, a, sizeof(float)*N,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
start = clock();
RadixSort<EACH_THREAD><<<grid,block>>>(a_dev, sort_tmp0, sort_tmp1, N);
cudaDeviceSynchronize();
end = clock();
cout << "gpu time:" << end - start << endl;
cudaMemcpy(b, a_dev, sizeof(float)*N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int i = 0;i < N;i++) {
// cout << b[i] <<" ";
}
cout << endl;
RadixSortHost(a,N);
for (int i = 0;i < N;i++) {
if(a[i] != b[i]) {
printf("error: index%u gpu:%f cpu:%f\n",i,b[i], a[i]);
}
}
return 0;
}
|
94672a300285063dc1242d013382bdc0f67bd3f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "stream_compaction/efficient.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static BVHNodeDev * dev_bvhNodes = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const int samplesPerPixel = cam.samplesPerPixel;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * samplesPerPixel * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
int bvhNodesCount = hst_scene->bvhNodes.size();
hipMalloc(&dev_bvhNodes, bvhNodesCount * sizeof(BVHNodeDev));
hipMemcpy(dev_bvhNodes, hst_scene->bvhNodes.data(), bvhNodesCount * sizeof(BVHNodeDev), hipMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void updateGeom(Scene *scene) {
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_bvhNodes);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(
Camera cam
, int iter
, int traceDepth
, PathSegment* pathSegments
)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
for (int i = 0; i < cam.samplesPerPixel; ++i) {
int index = i + (x * cam.samplesPerPixel) + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
segment.ray.direction = glm::normalize(
cam.forward
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f )
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = x + (y * cam.resolution.x);
segment.remainingBounces = traceDepth;
}
}
}
__device__ float computeIntersection(
const PathSegment& pathSegment
, const Geom& geom
, glm::vec3& intersect_point
, glm::vec3& normal
) {
float t = -1;
bool outside = true;
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, intersect_point, normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, intersect_point, normal, outside);
}
else if (geom.type == TRIANGLE)
{
t = triangleIntersectionTest(geom, pathSegment.ray, intersect_point, normal, outside);
}
return t;
}
/*
* Iterative stack-less BVH traversal using state logic and pointers to nodes.
* \ref https://graphics.cg.uni-saarland.de/fileadmin/cguds/papers/2011/hapala_sccg2011/hapala_sccg2011.pdf
*/
__global__ void traverseBVH(
int depth
, int num_paths
, PathSegment * pathSegments
, int num_bvhNodes
, BVHNodeDev* bvhNodes
, int rootIdx
, int geoms_size
, Geom * geoms
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
Geom* hit_geom = nullptr;
BVHNodeDev current = bvhNodes[rootIdx];
EBVHTransition transition = EBVHTransition::FromParent;
bool isIterating = true;
while (isIterating) {
// States (reproduced here from Stack-less BVH Traversal paper [Hapala1 el at. 2011])
// Link: https://graphics.cg.uni-saarland.de/fileadmin/cguds/papers/2011/hapala_sccg2011/hapala_sccg2011.pdf
switch (transition) {
// 1. From child
// In the fromChild case the current node was already tested when going
// down, and does not have to bce re - tested.The next node to traverse
// is either currents sibling f arChild(if current is nearChild),
// or its parent(if current was farChild).
//
case EBVHTransition::FromChild:
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
}
else if (current.idx == bvhNodes[current.parentIdx].nearChildIdx) {
// Current is near child, so transition to far child
current = bvhNodes[bvhNodes[current.parentIdx].farChildIdx];
transition = EBVHTransition::FromSibling;
}
else {
// Current is far child, go back to parent
current = bvhNodes[current.parentIdx];
transition = EBVHTransition::FromChild;
}
break;
// 2. From sibling
// In the fromSibling case, we know that we are entering farChild (it
// cannot be reached in any other way), and that we are traversing this
// node for the first time(i.e.a box test has to be done).If the node
// is missed, we back - track to its parent; otherwise, the current node
// has to be processed : if it is a leaf node, we intersect its primitives
// against the ray, and proceed to parent. Otherwise(i.e. if the node
// was hit but is not a leaf), we enter currents subtree by performing
// a fromParent step to currents first child.
case EBVHTransition::FromSibling:
if (current.geomIdx != -1) {
// Leaf node
t = computeIntersection(pathSegment, geoms[current.geomIdx], tmp_intersect, tmp_normal);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
intersect_point = tmp_intersect;
normal = tmp_normal;
hit_geom = &(geoms[current.geomIdx]);
}
current = bvhNodes[current.parentIdx];
transition = EBVHTransition::FromChild;
}
else {
// When this isn't a leaf node, check bbox intersection
bool hit = bboxIntersectionTest(current.bboxGeom, pathSegments[path_index].ray);
if (!hit) {
// Missed, go back up to parent
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
} else {
current = bvhNodes[current.parentIdx];
transition = EBVHTransition::FromChild;
}
}
else {
// Hit, enter its subtree (near child)
current = bvhNodes[current.nearChildIdx];
transition = EBVHTransition::FromParent;
}
}
break;
// 3. From parent
// Finally, in the fromParent case, we know that we are entering
// nearChild and we do exactly the same as in the previous case,
// except that every time we would have gone to parent we go to
// farChild child.
case EBVHTransition::FromParent:
if (current.geomIdx != -1) {
// Leaf node
t = computeIntersection(pathSegment, geoms[current.geomIdx], tmp_intersect, tmp_normal);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
intersect_point = tmp_intersect;
normal = tmp_normal;
hit_geom = &(geoms[current.geomIdx]);
}
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
} else {
// Go to far sibling
current = bvhNodes[bvhNodes[current.parentIdx].farChildIdx];
transition = EBVHTransition::FromSibling;
}
}
else {
// When this isn't a leaf node, check bbox intersection
bool hit = bboxIntersectionTest(current.bboxGeom, pathSegments[path_index].ray);
if (!hit) {
// Missed, go to far sibling
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
} else {
current = bvhNodes[bvhNodes[current.parentIdx].farChildIdx];
transition = EBVHTransition::FromSibling;
}
}
else {
// Hit, enter its subtree
current = bvhNodes[current.nearChildIdx];
transition = EBVHTransition::FromParent;
}
}
break;
default:
// *N.B*: Should never reach here
assert(false);
break;
}
}
if (hit_geom == nullptr)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = hit_geom->materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersect_point = intersect_point;
}
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
t = computeIntersection(pathSegments[path_index], geom, tmp_intersect, tmp_normal);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersect_point = intersect_point;
}
}
}
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths && pathSegments[idx].remainingBounces > 0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
else {
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
scatterRay(pathSegments[idx], intersection.intersect_point, intersection.surfaceNormal, materials[intersection.materialId], rng);
pathSegments[idx].remainingBounces -= 1;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0);
pathSegments[idx].remainingBounces = 0;
}
}
}
__global__ void partialGather(
const Camera cam
, int nPaths
, glm::vec3* image
, PathSegment* iterationPaths
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
if (iterationPath.remainingBounces == 0) {
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(
const Camera cam
, int nPaths
, glm::vec3* image
, PathSegment* iterationPaths
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
if (iterationPath.remainingBounces >= 0) {
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
}
struct shouldTerminatePath
{
__host__ __device__
bool operator()(const PathSegment& p)
{
return p.remainingBounces == 0;
};
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 8;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a tpath to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// Perform one iteration of path tracing
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
//hipEventRecord(start);
if (hst_scene->BVH_ENABLED) {
traverseBVH << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, hst_scene->bvhNodes.size()
, dev_bvhNodes
, hst_scene->root->nodeIdx
, hst_scene->geoms.size()
, dev_geoms
, dev_intersections
);
} else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
}
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
//hipEventRecord(stop);
depth++;
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shadeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
if (hst_scene->STREAM_COMPACTION_ENABLED) {
// If using stream compaction, we have to use partial gather
dim3 numBlocksPixels = (num_paths + blockSize1d - 1) / blockSize1d;
partialGather << <numBlocksPixels, blockSize1d >> >(cam, num_paths, dev_image, dev_paths);
#define USE_THRUST
#ifdef USE_THRUST
PathSegment* new_dev_paths_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, shouldTerminatePath());
num_paths = new_dev_paths_end - dev_paths;
#else
num_paths = StreamCompaction::OptimizedEfficient::compact(num_paths, dev_paths, dev_paths);
#endif
}
//hipEventSynchronize(stop);
//float ms;
//hipEventElapsedTime(&ms, start, stop);
//cout << ms << endl;
iterationComplete = num_paths == 0 || depth > traceDepth;
}
if (!hst_scene->STREAM_COMPACTION_ENABLED) {
// If not using stream compaction, apply final gather
// Assemble this iteration and apply it to the image
num_paths = dev_path_end - dev_paths;
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(cam, num_paths, dev_image, dev_paths);
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 94672a300285063dc1242d013382bdc0f67bd3f6.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "stream_compaction/efficient.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static BVHNodeDev * dev_bvhNodes = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const int samplesPerPixel = cam.samplesPerPixel;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * samplesPerPixel * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
int bvhNodesCount = hst_scene->bvhNodes.size();
cudaMalloc(&dev_bvhNodes, bvhNodesCount * sizeof(BVHNodeDev));
cudaMemcpy(dev_bvhNodes, hst_scene->bvhNodes.data(), bvhNodesCount * sizeof(BVHNodeDev), cudaMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void updateGeom(Scene *scene) {
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_bvhNodes);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(
Camera cam
, int iter
, int traceDepth
, PathSegment* pathSegments
)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
for (int i = 0; i < cam.samplesPerPixel; ++i) {
int index = i + (x * cam.samplesPerPixel) + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
segment.ray.direction = glm::normalize(
cam.forward
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f )
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = x + (y * cam.resolution.x);
segment.remainingBounces = traceDepth;
}
}
}
__device__ float computeIntersection(
const PathSegment& pathSegment
, const Geom& geom
, glm::vec3& intersect_point
, glm::vec3& normal
) {
float t = -1;
bool outside = true;
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, intersect_point, normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, intersect_point, normal, outside);
}
else if (geom.type == TRIANGLE)
{
t = triangleIntersectionTest(geom, pathSegment.ray, intersect_point, normal, outside);
}
return t;
}
/*
* Iterative stack-less BVH traversal using state logic and pointers to nodes.
* \ref https://graphics.cg.uni-saarland.de/fileadmin/cguds/papers/2011/hapala_sccg2011/hapala_sccg2011.pdf
*/
__global__ void traverseBVH(
int depth
, int num_paths
, PathSegment * pathSegments
, int num_bvhNodes
, BVHNodeDev* bvhNodes
, int rootIdx
, int geoms_size
, Geom * geoms
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
Geom* hit_geom = nullptr;
BVHNodeDev current = bvhNodes[rootIdx];
EBVHTransition transition = EBVHTransition::FromParent;
bool isIterating = true;
while (isIterating) {
// States (reproduced here from Stack-less BVH Traversal paper [Hapala1 el at. 2011])
// Link: https://graphics.cg.uni-saarland.de/fileadmin/cguds/papers/2011/hapala_sccg2011/hapala_sccg2011.pdf
switch (transition) {
// 1. From child
// In the fromChild case the current node was already tested when going
// down, and does not have to bce re - tested.The next node to traverse
// is either current’s sibling f arChild(if current is nearChild),
// or its parent(if current was farChild).
//
case EBVHTransition::FromChild:
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
}
else if (current.idx == bvhNodes[current.parentIdx].nearChildIdx) {
// Current is near child, so transition to far child
current = bvhNodes[bvhNodes[current.parentIdx].farChildIdx];
transition = EBVHTransition::FromSibling;
}
else {
// Current is far child, go back to parent
current = bvhNodes[current.parentIdx];
transition = EBVHTransition::FromChild;
}
break;
// 2. From sibling
// In the fromSibling case, we know that we are entering farChild (it
// cannot be reached in any other way), and that we are traversing this
// node for the first time(i.e.a box test has to be done).If the node
// is missed, we back - track to its parent; otherwise, the current node
// has to be processed : if it is a leaf node, we intersect its primitives
// against the ray, and proceed to parent. Otherwise(i.e. if the node
// was hit but is not a leaf), we enter current’s subtree by performing
// a fromParent step to current’s first child.
case EBVHTransition::FromSibling:
if (current.geomIdx != -1) {
// Leaf node
t = computeIntersection(pathSegment, geoms[current.geomIdx], tmp_intersect, tmp_normal);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
intersect_point = tmp_intersect;
normal = tmp_normal;
hit_geom = &(geoms[current.geomIdx]);
}
current = bvhNodes[current.parentIdx];
transition = EBVHTransition::FromChild;
}
else {
// When this isn't a leaf node, check bbox intersection
bool hit = bboxIntersectionTest(current.bboxGeom, pathSegments[path_index].ray);
if (!hit) {
// Missed, go back up to parent
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
} else {
current = bvhNodes[current.parentIdx];
transition = EBVHTransition::FromChild;
}
}
else {
// Hit, enter its subtree (near child)
current = bvhNodes[current.nearChildIdx];
transition = EBVHTransition::FromParent;
}
}
break;
// 3. From parent
// Finally, in the fromParent case, we know that we are entering
// nearChild and we do exactly the same as in the previous case,
// except that every time we would have gone to parent we go to
// farChild child.
case EBVHTransition::FromParent:
if (current.geomIdx != -1) {
// Leaf node
t = computeIntersection(pathSegment, geoms[current.geomIdx], tmp_intersect, tmp_normal);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
intersect_point = tmp_intersect;
normal = tmp_normal;
hit_geom = &(geoms[current.geomIdx]);
}
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
} else {
// Go to far sibling
current = bvhNodes[bvhNodes[current.parentIdx].farChildIdx];
transition = EBVHTransition::FromSibling;
}
}
else {
// When this isn't a leaf node, check bbox intersection
bool hit = bboxIntersectionTest(current.bboxGeom, pathSegments[path_index].ray);
if (!hit) {
// Missed, go to far sibling
if (current.idx == rootIdx) {
// Current has reached root
isIterating = false;
} else {
current = bvhNodes[bvhNodes[current.parentIdx].farChildIdx];
transition = EBVHTransition::FromSibling;
}
}
else {
// Hit, enter its subtree
current = bvhNodes[current.nearChildIdx];
transition = EBVHTransition::FromParent;
}
}
break;
default:
// *N.B*: Should never reach here
assert(false);
break;
}
}
if (hit_geom == nullptr)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = hit_geom->materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersect_point = intersect_point;
}
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
t = computeIntersection(pathSegments[path_index], geom, tmp_intersect, tmp_normal);
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersect_point = intersect_point;
}
}
}
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths && pathSegments[idx].remainingBounces > 0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
else {
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
scatterRay(pathSegments[idx], intersection.intersect_point, intersection.surfaceNormal, materials[intersection.materialId], rng);
pathSegments[idx].remainingBounces -= 1;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0);
pathSegments[idx].remainingBounces = 0;
}
}
}
__global__ void partialGather(
const Camera cam
, int nPaths
, glm::vec3* image
, PathSegment* iterationPaths
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
if (iterationPath.remainingBounces == 0) {
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(
const Camera cam
, int nPaths
, glm::vec3* image
, PathSegment* iterationPaths
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
if (iterationPath.remainingBounces >= 0) {
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
}
struct shouldTerminatePath
{
__host__ __device__
bool operator()(const PathSegment& p)
{
return p.remainingBounces == 0;
};
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 8;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a tpath to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// Perform one iteration of path tracing
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
//cudaEventRecord(start);
if (hst_scene->BVH_ENABLED) {
traverseBVH << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, hst_scene->bvhNodes.size()
, dev_bvhNodes
, hst_scene->root->nodeIdx
, hst_scene->geoms.size()
, dev_geoms
, dev_intersections
);
} else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
}
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
//cudaEventRecord(stop);
depth++;
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
if (hst_scene->STREAM_COMPACTION_ENABLED) {
// If using stream compaction, we have to use partial gather
dim3 numBlocksPixels = (num_paths + blockSize1d - 1) / blockSize1d;
partialGather << <numBlocksPixels, blockSize1d >> >(cam, num_paths, dev_image, dev_paths);
#define USE_THRUST
#ifdef USE_THRUST
PathSegment* new_dev_paths_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, shouldTerminatePath());
num_paths = new_dev_paths_end - dev_paths;
#else
num_paths = StreamCompaction::OptimizedEfficient::compact(num_paths, dev_paths, dev_paths);
#endif
}
//cudaEventSynchronize(stop);
//float ms;
//cudaEventElapsedTime(&ms, start, stop);
//cout << ms << endl;
iterationComplete = num_paths == 0 || depth > traceDepth;
}
if (!hst_scene->STREAM_COMPACTION_ENABLED) {
// If not using stream compaction, apply final gather
// Assemble this iteration and apply it to the image
num_paths = dev_path_end - dev_paths;
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(cam, num_paths, dev_image, dev_paths);
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
50260ab2008dd299c26a0d8a333d1e82b3d3f742.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/types.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#define TILE_SIZE 8
#define X0(a,b) X[(a)*x_cols+(b)]
#define Y0(a,b) Y[(a)*y_cols+(b)]
#define W0(a,b) W[(a)*w_cols+(b)]
#define G0(a,b,c) G[(a)*g_rows*g_cols+(b)*g_cols+(c)]
#define DY(a,b) dY[(a)*y_cols+(b)]
#define DG(a,b,c) dG[(a)*g_rows*g_cols+(b)*g_cols+(c)]
#define DW(a,b) dW[(a)*w_cols+(b)]
/*
* Forward functions
*/
__global__ void gmv_double1(double *Y, double *X, double *W, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
for (auto b = 0; b < batches; b++) {
for (auto o = 0; o < dim_output; o++) {
double tmp = 0.0;
for (auto i = 0; i < dim_input; i++) {
tmp += X0(b, i) * W0(i, o) * G0(b,i/dimx,o/dimy);
}
Y0(b,o) = tmp;
}
}
}
void cuda_gmv_forward1(torch::Tensor Y, torch::Tensor X, torch::Tensor W, torch::Tensor G) {
const auto batches = Y.size(0);
const auto dim_output = Y.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / G.size(1);
const auto dimy = W.size(1) / G.size(2);
printf("cuda_gmv_forward1 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(1, 1);
dim3 block(1, 1);
hipLaunchKernelGGL(( gmv_double1), dim3(grid), dim3(block), 0, 0, Y.data<double>(), X.data<double>(), W.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
__global__ void gmv_double2(double *Y, double *X, double *W, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto b = blockIdx.x;
const auto o = blockIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
double tmp = 0.0;
for (auto i = 0; i < dim_input; i++) {
tmp += X0(b, i) * W0(i, o) * G0(b,i/dimx,o/dimy);
}
Y0(b,o) = tmp;
}
void cuda_gmv_forward2(torch::Tensor Y, torch::Tensor X, torch::Tensor W, torch::Tensor G) {
const auto batches = Y.size(0);
const auto dim_output = Y.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / G.size(1);
const auto dimy = W.size(1) / G.size(2);
printf("cuda_gmv_forward2 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(batches, dim_output);
dim3 block(1, 1);
hipLaunchKernelGGL(( gmv_double2), dim3(grid), dim3(block), 0, 0, Y.data<double>(), X.data<double>(), W.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
template<typename T> __global__ void gmv_3(T *Y, T *X, T *W, T *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto b0 = blockIdx.x * blockDim.x;
const auto b = threadIdx.x;
const auto o0 = blockIdx.y * blockDim.y;
const auto o = threadIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
T tmp = 0.0;
int i0 = 0;
for (auto m = 0; m < dim_input / TILE_SIZE; m++) {
__shared__ T Xs[TILE_SIZE][TILE_SIZE];
__shared__ T Ws[TILE_SIZE][TILE_SIZE];
Xs[threadIdx.y][threadIdx.x] = X0(b0+threadIdx.y, i0+threadIdx.x);
Ws[threadIdx.y][threadIdx.x] = W0(i0+threadIdx.y, o0+threadIdx.x);
__syncthreads();
T tmp1 = 0.0;
for (auto i = 0; i < TILE_SIZE; i++) {
// tmp += X0(b0+b, i0 + i) * W0(i0 + i, o0+o) * G0(b0+b,(i0+i)/dimx,(o0+o)/dimy);
tmp1 += Xs[b][i] * Ws[i][o];
}
tmp += tmp1 * G0(b0+b,i0/dimx,o0/dimy);
i0 += TILE_SIZE;
__syncthreads();
}
Y0(b0+b,o0+o) = tmp;
}
void cuda_gmv_forward3(torch::Tensor Y, torch::Tensor X, torch::Tensor W, torch::Tensor G) {
const auto batches = Y.size(0);
const auto dim_output = Y.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / G.size(1);
const auto dimy = W.size(1) / G.size(2);
if (batches % TILE_SIZE != 0) AT_ERROR("respect tile size for batches");
if (dim_output % TILE_SIZE != 0) AT_ERROR("respect tile size for dim_output");
// printf("cuda_gmv_forward3 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(batches / TILE_SIZE, dim_output / TILE_SIZE);
dim3 block(TILE_SIZE, TILE_SIZE);
if (Y.type().scalarType() == torch::ScalarType::Double) {
hipLaunchKernelGGL(( gmv_3<double>), dim3(grid), dim3(block), 0, 0, Y.data<double>(), X.data<double>(), W.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
} else {
printf("float\n");
hipLaunchKernelGGL(( gmv_3<float>), dim3(grid), dim3(block), 0, 0, Y.data<float>(), X.data<float>(), W.data<float>(), G.data<float>(), batches, dim_output, dim_input, dimx, dimy);
}
}
/*
* Backward_g functions
*/
__global__ void gmv_backward_g_1_double(double *dY, double *X, double *W, double *dG, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
for (auto b = 0; b < batches; b++) {
for (auto o = 0; o < dim_output; o++) {
for (auto i = 0; i < dim_input; i++) {
DG(b,i/dimx,o/dimy) += X0(b, i) * W0(i, o) * DY(b,o);
}
}
}
}
void cuda_gmv_backward_g_1(torch::Tensor dY, torch::Tensor X, torch::Tensor W, torch::Tensor dG) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / dG.size(1);
const auto dimy = W.size(1) / dG.size(2);
printf("cuda_gmv_backward_g_1 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(1, 1);
dim3 block(1, 1);
hipLaunchKernelGGL(( gmv_backward_g_1_double), dim3(grid), dim3(block), 0, 0, dY.data<double>(), X.data<double>(), W.data<double>(), dG.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
__global__ void gmv_backward_g_2_double(double *dY, double *X, double *W, double *dG, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
const auto b = blockIdx.x / g_rows;
const auto gr = blockIdx.x % g_rows;
const auto gc = blockIdx.y;
double tmp = 0;
for (int u = 0; u < dimx; u++) {
for (int v = 0; v < dimy; v++) {
int i = gr * dimx + u;
int o = gc * dimy + v;
tmp += X0(b, i) * W0(i, o) * DY(b,o);
}
}
DG(b,gr,gc) = tmp;
}
void cuda_gmv_backward_g_2(torch::Tensor dY, torch::Tensor X, torch::Tensor W, torch::Tensor dG) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / dG.size(1);
const auto dimy = W.size(1) / dG.size(2);
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
printf("cuda_gmv_backward_g_2 -> batches=%ld dim_output=%ld dim_input=%ld blocks=%ld,%ld,%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, batches, g_rows, g_cols, dimx, dimy);
dim3 grid(batches*g_rows, g_cols);
dim3 block(1, 1);
hipLaunchKernelGGL(( gmv_backward_g_2_double), dim3(grid), dim3(block), 0, 0, dY.data<double>(), X.data<double>(), W.data<double>(), dG.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
template<typename T> __global__ void gmv_backward_g_3(T *dY, T *X, T *W, T *dG, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
const auto b = blockIdx.x / g_rows;
const auto gr = blockIdx.x % g_rows;
const auto gc = blockIdx.y;
T tmp = 0;
for (int u = 0; u < dimx; u++) {
int i = gr * dimx + u;
int o = gc * dimy;
T tmp1 = 0;
for (int v = 0; v < dimy; v++) {
tmp1 += W0(i, o) * DY(b,o);
o++;
}
tmp += X0(b, i) * tmp1;
}
DG(b,gr,gc) = tmp;
}
void cuda_gmv_backward_g_3(torch::Tensor dY, torch::Tensor X, torch::Tensor W, torch::Tensor dG) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / dG.size(1);
const auto dimy = W.size(1) / dG.size(2);
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
printf("cuda_gmv_backward_g_3 -> batches=%ld dim_output=%ld dim_input=%ld blocks=%ld,%ld,%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, batches, g_rows, g_cols, dimx, dimy);
dim3 grid(batches*g_rows, g_cols);
dim3 block(1, 1);
if (dY.type().scalarType() == torch::ScalarType::Double) {
hipLaunchKernelGGL(( gmv_backward_g_3<double>), dim3(grid), dim3(block), 0, 0, dY.data<double>(), X.data<double>(), W.data<double>(), dG.data<double>(), batches, dim_output, dim_input, dimx, dimy);
} else {
hipLaunchKernelGGL(( gmv_backward_g_3<float>), dim3(grid), dim3(block), 0, 0, dY.data<float>(), X.data<float>(), W.data<float>(), dG.data<float>(), batches, dim_output, dim_input, dimx, dimy);
}
}
/*
* Backward_w functions
*/
__global__ void gmv_backward_w_1_double(double *dY, double *X, double *dW, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
for (auto b = 0; b < batches; b++) {
for (auto o = 0; o < dim_output; o++) {
for (auto i = 0; i < dim_input; i++) {
DW(i, o) += X0(b, i) * G0(b,i/dimx,o/dimy) * DY(b,o);
}
}
}
}
void cuda_gmv_backward_w_1(torch::Tensor dY, torch::Tensor X, torch::Tensor dW, torch::Tensor G) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = dW.size(0) / G.size(1);
const auto dimy = dW.size(1) / G.size(2);
printf("cuda_gmv_backward_g_1 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(1, 1);
dim3 block(1, 1);
hipLaunchKernelGGL(( gmv_backward_w_1_double), dim3(grid), dim3(block), 0, 0, dY.data<double>(), X.data<double>(), dW.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
__global__ void gmv_backward_w_2_double(double *dY, double *X, double *dW, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto o = blockIdx.x;
const auto i = blockIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
double tmp = 0;
for (auto b = 0; b < batches; b++) {
tmp += X0(b, i) * G0(b,i/dimx,o/dimy) * DY(b,o);
}
DW(i, o) = tmp;
}
void cuda_gmv_backward_w_2(torch::Tensor dY, torch::Tensor X, torch::Tensor dW, torch::Tensor G) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = dW.size(0) / G.size(1);
const auto dimy = dW.size(1) / G.size(2);
printf("gmv_backward_w_2_double -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(dim_output, dim_input);
dim3 block(1, 1);
hipLaunchKernelGGL(( gmv_backward_w_2_double), dim3(grid), dim3(block), 0, 0, dY.data<double>(), X.data<double>(), dW.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
template<typename T> __global__ void gmv_backward_w_3(T *dY, T *X, T *dW, T *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto o0 = blockIdx.x * blockDim.x, o = threadIdx.x;
const auto i0 = blockIdx.y * blockDim.y, i = threadIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
T tmp = 0;
int b0 = 0;
for (auto m = 0; m < batches / TILE_SIZE; m++) {
__shared__ T Xs[TILE_SIZE][TILE_SIZE];
__shared__ T Ys[TILE_SIZE][TILE_SIZE];
Xs[threadIdx.x][threadIdx.y] = X0(b0+threadIdx.x, i0+threadIdx.y);
Ys[threadIdx.x][threadIdx.y] = DY(b0+threadIdx.x, o0+threadIdx.y);
// todo: bring G0 into shared mem?
__syncthreads();
for (auto b = 0; b < TILE_SIZE; b++) {
// tmp += X0(b0+b, i0+i) * G0(b0+b,(i0+i)/dimx,(o0+o)/dimy) * DY(b0+b,o0+o);
tmp += Xs[b][i] * Ys[b][o] * G0(b0+b,(i0+i)/dimx,(o0+o)/dimy);
}
b0 += TILE_SIZE;
__syncthreads();
}
DW(i0+i, o0+o) = tmp;
}
void cuda_gmv_backward_w_3(torch::Tensor dY, torch::Tensor X, torch::Tensor dW, torch::Tensor G) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = dW.size(0) / G.size(1);
const auto dimy = dW.size(1) / G.size(2);
// printf("gmv_backward_w_3_double -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
if (dim_output % TILE_SIZE != 0) AT_ERROR("respect tile size for dim_output");
if (dim_input % TILE_SIZE != 0) AT_ERROR("respect tile size for dim_input");
dim3 grid(dim_output / TILE_SIZE, dim_input / TILE_SIZE);
dim3 block(TILE_SIZE, TILE_SIZE);
if (dY.type().scalarType() == torch::ScalarType::Double) {
hipLaunchKernelGGL(( gmv_backward_w_3<double>), dim3(grid), dim3(block), 0, 0, dY.data<double>(), X.data<double>(), dW.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
} else {
hipLaunchKernelGGL(( gmv_backward_w_3<float>), dim3(grid), dim3(block), 0, 0, dY.data<float>(), X.data<float>(), dW.data<float>(), G.data<float>(), batches, dim_output, dim_input, dimx, dimy);
}
}
| 50260ab2008dd299c26a0d8a333d1e82b3d3f742.cu | #include <torch/types.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#define TILE_SIZE 8
#define X0(a,b) X[(a)*x_cols+(b)]
#define Y0(a,b) Y[(a)*y_cols+(b)]
#define W0(a,b) W[(a)*w_cols+(b)]
#define G0(a,b,c) G[(a)*g_rows*g_cols+(b)*g_cols+(c)]
#define DY(a,b) dY[(a)*y_cols+(b)]
#define DG(a,b,c) dG[(a)*g_rows*g_cols+(b)*g_cols+(c)]
#define DW(a,b) dW[(a)*w_cols+(b)]
/*
* Forward functions
*/
__global__ void gmv_double1(double *Y, double *X, double *W, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
for (auto b = 0; b < batches; b++) {
for (auto o = 0; o < dim_output; o++) {
double tmp = 0.0;
for (auto i = 0; i < dim_input; i++) {
tmp += X0(b, i) * W0(i, o) * G0(b,i/dimx,o/dimy);
}
Y0(b,o) = tmp;
}
}
}
void cuda_gmv_forward1(torch::Tensor Y, torch::Tensor X, torch::Tensor W, torch::Tensor G) {
const auto batches = Y.size(0);
const auto dim_output = Y.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / G.size(1);
const auto dimy = W.size(1) / G.size(2);
printf("cuda_gmv_forward1 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(1, 1);
dim3 block(1, 1);
gmv_double1<<<grid, block>>>(Y.data<double>(), X.data<double>(), W.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
__global__ void gmv_double2(double *Y, double *X, double *W, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto b = blockIdx.x;
const auto o = blockIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
double tmp = 0.0;
for (auto i = 0; i < dim_input; i++) {
tmp += X0(b, i) * W0(i, o) * G0(b,i/dimx,o/dimy);
}
Y0(b,o) = tmp;
}
void cuda_gmv_forward2(torch::Tensor Y, torch::Tensor X, torch::Tensor W, torch::Tensor G) {
const auto batches = Y.size(0);
const auto dim_output = Y.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / G.size(1);
const auto dimy = W.size(1) / G.size(2);
printf("cuda_gmv_forward2 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(batches, dim_output);
dim3 block(1, 1);
gmv_double2<<<grid, block>>>(Y.data<double>(), X.data<double>(), W.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
template<typename T> __global__ void gmv_3(T *Y, T *X, T *W, T *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto b0 = blockIdx.x * blockDim.x;
const auto b = threadIdx.x;
const auto o0 = blockIdx.y * blockDim.y;
const auto o = threadIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
T tmp = 0.0;
int i0 = 0;
for (auto m = 0; m < dim_input / TILE_SIZE; m++) {
__shared__ T Xs[TILE_SIZE][TILE_SIZE];
__shared__ T Ws[TILE_SIZE][TILE_SIZE];
Xs[threadIdx.y][threadIdx.x] = X0(b0+threadIdx.y, i0+threadIdx.x);
Ws[threadIdx.y][threadIdx.x] = W0(i0+threadIdx.y, o0+threadIdx.x);
__syncthreads();
T tmp1 = 0.0;
for (auto i = 0; i < TILE_SIZE; i++) {
// tmp += X0(b0+b, i0 + i) * W0(i0 + i, o0+o) * G0(b0+b,(i0+i)/dimx,(o0+o)/dimy);
tmp1 += Xs[b][i] * Ws[i][o];
}
tmp += tmp1 * G0(b0+b,i0/dimx,o0/dimy);
i0 += TILE_SIZE;
__syncthreads();
}
Y0(b0+b,o0+o) = tmp;
}
void cuda_gmv_forward3(torch::Tensor Y, torch::Tensor X, torch::Tensor W, torch::Tensor G) {
const auto batches = Y.size(0);
const auto dim_output = Y.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / G.size(1);
const auto dimy = W.size(1) / G.size(2);
if (batches % TILE_SIZE != 0) AT_ERROR("respect tile size for batches");
if (dim_output % TILE_SIZE != 0) AT_ERROR("respect tile size for dim_output");
// printf("cuda_gmv_forward3 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(batches / TILE_SIZE, dim_output / TILE_SIZE);
dim3 block(TILE_SIZE, TILE_SIZE);
if (Y.type().scalarType() == torch::ScalarType::Double) {
gmv_3<double><<<grid, block>>>(Y.data<double>(), X.data<double>(), W.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
} else {
printf("float\n");
gmv_3<float><<<grid, block>>>(Y.data<float>(), X.data<float>(), W.data<float>(), G.data<float>(), batches, dim_output, dim_input, dimx, dimy);
}
}
/*
* Backward_g functions
*/
__global__ void gmv_backward_g_1_double(double *dY, double *X, double *W, double *dG, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
for (auto b = 0; b < batches; b++) {
for (auto o = 0; o < dim_output; o++) {
for (auto i = 0; i < dim_input; i++) {
DG(b,i/dimx,o/dimy) += X0(b, i) * W0(i, o) * DY(b,o);
}
}
}
}
void cuda_gmv_backward_g_1(torch::Tensor dY, torch::Tensor X, torch::Tensor W, torch::Tensor dG) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / dG.size(1);
const auto dimy = W.size(1) / dG.size(2);
printf("cuda_gmv_backward_g_1 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(1, 1);
dim3 block(1, 1);
gmv_backward_g_1_double<<<grid, block>>>(dY.data<double>(), X.data<double>(), W.data<double>(), dG.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
__global__ void gmv_backward_g_2_double(double *dY, double *X, double *W, double *dG, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
const auto b = blockIdx.x / g_rows;
const auto gr = blockIdx.x % g_rows;
const auto gc = blockIdx.y;
double tmp = 0;
for (int u = 0; u < dimx; u++) {
for (int v = 0; v < dimy; v++) {
int i = gr * dimx + u;
int o = gc * dimy + v;
tmp += X0(b, i) * W0(i, o) * DY(b,o);
}
}
DG(b,gr,gc) = tmp;
}
void cuda_gmv_backward_g_2(torch::Tensor dY, torch::Tensor X, torch::Tensor W, torch::Tensor dG) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / dG.size(1);
const auto dimy = W.size(1) / dG.size(2);
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
printf("cuda_gmv_backward_g_2 -> batches=%ld dim_output=%ld dim_input=%ld blocks=%ld,%ld,%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, batches, g_rows, g_cols, dimx, dimy);
dim3 grid(batches*g_rows, g_cols);
dim3 block(1, 1);
gmv_backward_g_2_double<<<grid, block>>>(dY.data<double>(), X.data<double>(), W.data<double>(), dG.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
template<typename T> __global__ void gmv_backward_g_3(T *dY, T *X, T *W, T *dG, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
const auto b = blockIdx.x / g_rows;
const auto gr = blockIdx.x % g_rows;
const auto gc = blockIdx.y;
T tmp = 0;
for (int u = 0; u < dimx; u++) {
int i = gr * dimx + u;
int o = gc * dimy;
T tmp1 = 0;
for (int v = 0; v < dimy; v++) {
tmp1 += W0(i, o) * DY(b,o);
o++;
}
tmp += X0(b, i) * tmp1;
}
DG(b,gr,gc) = tmp;
}
void cuda_gmv_backward_g_3(torch::Tensor dY, torch::Tensor X, torch::Tensor W, torch::Tensor dG) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = W.size(0) / dG.size(1);
const auto dimy = W.size(1) / dG.size(2);
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
printf("cuda_gmv_backward_g_3 -> batches=%ld dim_output=%ld dim_input=%ld blocks=%ld,%ld,%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, batches, g_rows, g_cols, dimx, dimy);
dim3 grid(batches*g_rows, g_cols);
dim3 block(1, 1);
if (dY.type().scalarType() == torch::ScalarType::Double) {
gmv_backward_g_3<double><<<grid, block>>>(dY.data<double>(), X.data<double>(), W.data<double>(), dG.data<double>(), batches, dim_output, dim_input, dimx, dimy);
} else {
gmv_backward_g_3<float><<<grid, block>>>(dY.data<float>(), X.data<float>(), W.data<float>(), dG.data<float>(), batches, dim_output, dim_input, dimx, dimy);
}
}
/*
* Backward_w functions
*/
__global__ void gmv_backward_w_1_double(double *dY, double *X, double *dW, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
for (auto b = 0; b < batches; b++) {
for (auto o = 0; o < dim_output; o++) {
for (auto i = 0; i < dim_input; i++) {
DW(i, o) += X0(b, i) * G0(b,i/dimx,o/dimy) * DY(b,o);
}
}
}
}
void cuda_gmv_backward_w_1(torch::Tensor dY, torch::Tensor X, torch::Tensor dW, torch::Tensor G) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = dW.size(0) / G.size(1);
const auto dimy = dW.size(1) / G.size(2);
printf("cuda_gmv_backward_g_1 -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(1, 1);
dim3 block(1, 1);
gmv_backward_w_1_double<<<grid, block>>>(dY.data<double>(), X.data<double>(), dW.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
__global__ void gmv_backward_w_2_double(double *dY, double *X, double *dW, double *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto o = blockIdx.x;
const auto i = blockIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
double tmp = 0;
for (auto b = 0; b < batches; b++) {
tmp += X0(b, i) * G0(b,i/dimx,o/dimy) * DY(b,o);
}
DW(i, o) = tmp;
}
void cuda_gmv_backward_w_2(torch::Tensor dY, torch::Tensor X, torch::Tensor dW, torch::Tensor G) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = dW.size(0) / G.size(1);
const auto dimy = dW.size(1) / G.size(2);
printf("gmv_backward_w_2_double -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
dim3 grid(dim_output, dim_input);
dim3 block(1, 1);
gmv_backward_w_2_double<<<grid, block>>>(dY.data<double>(), X.data<double>(), dW.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
}
template<typename T> __global__ void gmv_backward_w_3(T *dY, T *X, T *dW, T *G, int batches, int dim_output, int dim_input, int dimx, int dimy) {
const auto o0 = blockIdx.x * blockDim.x, o = threadIdx.x;
const auto i0 = blockIdx.y * blockDim.y, i = threadIdx.y;
const auto y_rows = batches, y_cols = dim_output;
const auto x_rows = batches, x_cols = dim_input;
const auto w_rows = dim_input, w_cols = dim_output;
const auto g_rows = dim_input/dimx, g_cols = dim_output/dimy;
T tmp = 0;
int b0 = 0;
for (auto m = 0; m < batches / TILE_SIZE; m++) {
__shared__ T Xs[TILE_SIZE][TILE_SIZE];
__shared__ T Ys[TILE_SIZE][TILE_SIZE];
Xs[threadIdx.x][threadIdx.y] = X0(b0+threadIdx.x, i0+threadIdx.y);
Ys[threadIdx.x][threadIdx.y] = DY(b0+threadIdx.x, o0+threadIdx.y);
// todo: bring G0 into shared mem?
__syncthreads();
for (auto b = 0; b < TILE_SIZE; b++) {
// tmp += X0(b0+b, i0+i) * G0(b0+b,(i0+i)/dimx,(o0+o)/dimy) * DY(b0+b,o0+o);
tmp += Xs[b][i] * Ys[b][o] * G0(b0+b,(i0+i)/dimx,(o0+o)/dimy);
}
b0 += TILE_SIZE;
__syncthreads();
}
DW(i0+i, o0+o) = tmp;
}
void cuda_gmv_backward_w_3(torch::Tensor dY, torch::Tensor X, torch::Tensor dW, torch::Tensor G) {
const auto batches = dY.size(0);
const auto dim_output = dY.size(1);
const auto dim_input = X.size(1);
const auto dimx = dW.size(0) / G.size(1);
const auto dimy = dW.size(1) / G.size(2);
// printf("gmv_backward_w_3_double -> batches=%ld dim_output=%ld dim_input=%ld dimx=%ld dimy=%ld\n", batches, dim_output, dim_input, dimx, dimy);
if (dim_output % TILE_SIZE != 0) AT_ERROR("respect tile size for dim_output");
if (dim_input % TILE_SIZE != 0) AT_ERROR("respect tile size for dim_input");
dim3 grid(dim_output / TILE_SIZE, dim_input / TILE_SIZE);
dim3 block(TILE_SIZE, TILE_SIZE);
if (dY.type().scalarType() == torch::ScalarType::Double) {
gmv_backward_w_3<double><<<grid, block>>>(dY.data<double>(), X.data<double>(), dW.data<double>(), G.data<double>(), batches, dim_output, dim_input, dimx, dimy);
} else {
gmv_backward_w_3<float><<<grid, block>>>(dY.data<float>(), X.data<float>(), dW.data<float>(), G.data<float>(), batches, dim_output, dim_input, dimx, dimy);
}
}
|
bfb50d4026a60e6ca0364dd9be76af793bd624af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define MAX_SPLINES 100
#include <stdio.h>
#include "BsplineJastrowCudaPBC.h"
#include "../../CUDA/gpu_misc.h"
bool AisInitializedPBC = false;
static bool CMC_profile = false;
void CMC_profileSample(const char *function, float msec)
{
if (strcmp(function, "two_body_NLratios_PBC"))
{
return;
}
printf("%s: %1.3e msec\n", function, msec);
}
#define CMC_PROFILING_BEGIN() \
hipMemcpyToSymbolAsync(CMC_L, lattice, sizeof(CMC_L), 0, hipMemcpyDeviceToDevice, gpu::kernelStream); \
hipMemcpyToSymbolAsync(CMC_Linv, latticeInv, sizeof(CMC_Linv), 0, hipMemcpyDeviceToDevice, gpu::kernelStream); \
hipEvent_t start; \
hipEvent_t stop; \
if (CMC_profile) { \
hipEventCreate(&start); \
hipEventCreate(&stop); \
hipGetLastError(); \
hipEventRecord(start); \
}
#define CMC_PROFILING_END() \
if (CMC_profile) { \
hipEventRecord(stop); \
hipEventSynchronize(stop); \
float time = 0.0f; \
hipEventElapsedTime(&time, start, stop); \
hipEventDestroy(start); \
hipEventDestroy(stop); \
CMC_profileSample(__FUNCTION__, time); \
} \
if (hipGetLastError()) { printf("ERRROR!!!\n"); exit(1); }
static __constant__ float CMC_L[3][3];
static __constant__ float CMC_Linv[3][3];
template<typename T>
__device__ __forceinline__
T CMC_min_dist_fast(T& __restrict__ x, T& __restrict__ y, T& __restrict__ z)
{
T u0 = CMC_Linv[0][0]*x + CMC_Linv[1][0]*y + CMC_Linv[2][0]*z;
T u1 = CMC_Linv[0][1]*x + CMC_Linv[1][1]*y + CMC_Linv[2][1]*z;
T u2 = CMC_Linv[0][2]*x + CMC_Linv[1][2]*y + CMC_Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = CMC_L[0][0]*u0 + CMC_L[1][0]*u1 + CMC_L[2][0]*u2;
y = CMC_L[0][1]*u0 + CMC_L[1][1]*u1 + CMC_L[2][1]*u2;
z = CMC_L[0][2]*u0 + CMC_L[1][2]*u1 + CMC_L[2][2]*u2;
return sqrt (x*x + y*y + z*z);
}
template<typename T>
__device__ __forceinline__
T CMC_min_dist_only(T x, T y, T z)
{
T u0 = CMC_Linv[0][0]*x + CMC_Linv[1][0]*y + CMC_Linv[2][0]*z;
T u1 = CMC_Linv[0][1]*x + CMC_Linv[1][1]*y + CMC_Linv[2][1]*z;
T u2 = CMC_Linv[0][2]*x + CMC_Linv[1][2]*y + CMC_Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = CMC_L[0][0]*u0 + CMC_L[1][0]*u1 + CMC_L[2][0]*u2;
y = CMC_L[0][1]*u0 + CMC_L[1][1]*u1 + CMC_L[2][1]*u2;
z = CMC_L[0][2]*u0 + CMC_L[1][2]*u1 + CMC_L[2][2]*u2;
T d2min = x*x + y*y + z*z;
#pragma unroll
for (int i = -1; i <= 1; i++)
{
#pragma unroll
for (int j = -1; j <= 1; j++)
{
#pragma unroll
for (int k = -1; k <= 1; k++)
{
T xnew = CMC_L[0][0]*(u0+i) + CMC_L[1][0]*(u1+j) + CMC_L[2][0]*(u2+k);
T ynew = CMC_L[0][1]*(u0+i) + CMC_L[1][1]*(u1+j) + CMC_L[2][1]*(u2+k);
T znew = CMC_L[0][2]*(u0+i) + CMC_L[1][2]*(u1+j) + CMC_L[2][2]*(u2+k);
T d2 = xnew*xnew + ynew*ynew + znew*znew;
d2min = min(d2, d2min);
}
}
}
return sqrt(d2min);
}
template<typename T>
__device__ __forceinline__
T CMC_min_dist(T& __restrict__ x, T& __restrict__ y, T& __restrict__ z)
{
T u0 = CMC_Linv[0][0]*x + CMC_Linv[1][0]*y + CMC_Linv[2][0]*z;
T u1 = CMC_Linv[0][1]*x + CMC_Linv[1][1]*y + CMC_Linv[2][1]*z;
T u2 = CMC_Linv[0][2]*x + CMC_Linv[1][2]*y + CMC_Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = CMC_L[0][0]*u0 + CMC_L[1][0]*u1 + CMC_L[2][0]*u2;
y = CMC_L[0][1]*u0 + CMC_L[1][1]*u1 + CMC_L[2][1]*u2;
z = CMC_L[0][2]*u0 + CMC_L[1][2]*u1 + CMC_L[2][2]*u2;
T d2min = x*x + y*y + z*z;
#pragma unroll
for (int i = -1; i <= 1; i++)
{
#pragma unroll
for (int j = -1; j <= 1; j++)
{
#pragma unroll
for (int k = -1; k <= 1; k++)
{
T xnew = CMC_L[0][0]*(u0+i) + CMC_L[1][0]*(u1+j) + CMC_L[2][0]*(u2+k);
T ynew = CMC_L[0][1]*(u0+i) + CMC_L[1][1]*(u1+j) + CMC_L[2][1]*(u2+k);
T znew = CMC_L[0][2]*(u0+i) + CMC_L[1][2]*(u1+j) + CMC_L[2][2]*(u2+k);
T d2new = xnew*xnew + ynew*ynew + znew*znew;
if (d2new < d2min)
{
x = xnew;
y = ynew;
z = znew;
d2min = d2new;
}
}
}
}
return sqrt(d2min);
}
// void
// createCudaSplines (float rmax, int N,
// float f[], float df[], float d2f[],
// int &fSpline, int &dfSpline, int &d2fSpline)
// {
// hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
// hipArray *fArray, *dfArray, *d2fArray;
// hipMallocArray( &fArray, &channelDesc, N);
// hipMallocArray( &dfArray, &channelDesc, N);
// hipMallocArray(&d2fArray, &channelDesc, N);
// hipMemcpyToArray(fArray, N,1, f,N*sizeof(float),hipMemcpyHostToDevice);
// hipMemcpyToArray(dfArray, N,1, df,N*sizeof(float),hipMemcpyHostToDevice);
// hipMemcpyToArray(d2fArray,N,1,d2f,N*sizeof(float),hipMemcpyHostToDevice);
// hipBindTextureToArray(texSplines[fSpline=curTex++], fArray);
// hipBindTextureToArray(texSplines[dfSpline=curTex++], dfArray);
// hipBindTextureToArray(texSplines[d2fSpline=curTex++], d2fArray);
// }
template<typename T>
__device__ __forceinline__
T min_dist (T& __restrict__ x, T& __restrict__ y, T& __restrict__ z,
T const L[3][3], T const Linv[3][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
y = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
z = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
T d2min = x*x + y*y + z*z;
#pragma unroll
for (int i = -1; i <= 1; i++)
{
#pragma unroll
for (int j = -1; j <= 1; j++)
{
#pragma unroll
for (int k = -1; k <= 1; k++)
{
T xnew = L[0][0]*(u0+i) + L[1][0]*(u1+j) + L[2][0]*(u2+k);
T ynew = L[0][1]*(u0+i) + L[1][1]*(u1+j) + L[2][1]*(u2+k);
T znew = L[0][2]*(u0+i) + L[1][2]*(u1+j) + L[2][2]*(u2+k);
T d2 = xnew*xnew + ynew*ynew + znew*znew;
if (d2 < d2min)
{
d2min = d2;
x = xnew;
y = ynew;
z = znew;
}
}
}
}
return sqrt(d2min);
}
template<typename T>
__device__ __forceinline__
T min_dist_fast (T& __restrict__ x, T& __restrict__ y, T& __restrict__ z,
T const L[3][3], T const Linv[3][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
y = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
z = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
return sqrt(x*x + y*y + z*z);
}
template<typename T>
__device__ __forceinline__
T min_dist (T& __restrict__ x, T& __restrict__ y, T& __restrict__ z,
T const L[3][3], T const Linv[3][3],
T const images[27][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
T xtmp = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
T ytmp = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
T ztmp = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
x = xtmp;
y = ytmp;
z = ztmp;
T d2min = xtmp*xtmp + ytmp*ytmp + ztmp*ztmp;
for (int i=0; i<27; i++)
{
T xnew = xtmp + images[i][0];
T ynew = ytmp + images[i][1];
T znew = ztmp + images[i][2];
T d2 = xnew*xnew + ynew*ynew + znew*znew;
if (d2 < d2min)
{
x = xnew;
y = ynew;
z = znew;
d2min = d2;
}
// __syncthreads(); // XXXJCW: this doesn't appear to be needed
}
return sqrt(d2min);
}
template<typename T>
__device__ __forceinline__
T min_dist_only (T x, T y, T z,
T const L[3][3], T const Linv[3][3],
T const images[27][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
y = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
z = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
T d2min = x*x + y*y + z*z;
for (int i=0; i<27; i++)
{
T xnew = x + images[i][0];
T ynew = y + images[i][1];
T znew = z + images[i][2];
T d2 = xnew*xnew + ynew*ynew + znew*znew;
d2min = min (d2min, d2);
// __syncthreads(); // XXXJCW: this doesn't appear to be needed
}
return sqrt(d2min);
}
__constant__ float AcudaSpline[48];
__constant__ double AcudaSpline_double[48];
void
cuda_spline_init_PBC()
{
float A_h[48] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,
1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0,
0.0, -0.5, 1.0, -0.5,
0.0, 1.5, -2.0, 0.0,
0.0, -1.5, 1.0, 0.5,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, -1.0, 1.0,
0.0, 0.0, 3.0, -2.0,
0.0, 0.0, -3.0, 1.0,
0.0, 0.0, 1.0, 0.0
};
hipMemcpyToSymbol(AcudaSpline, A_h, 48*sizeof(float), 0,
hipMemcpyHostToDevice);
double A_d[48] = {-1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,
1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0,
0.0, -0.5, 1.0, -0.5,
0.0, 1.5, -2.0, 0.0,
0.0, -1.5, 1.0, 0.5,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, -1.0, 1.0,
0.0, 0.0, 3.0, -2.0,
0.0, 0.0, -3.0, 1.0,
0.0, 0.0, 1.0, 0.0
};
hipMemcpyToSymbol(AcudaSpline_double, A_d, 48*sizeof(double), 0,
hipMemcpyHostToDevice);
AisInitializedPBC = true;
}
template<typename T>
__device__ __forceinline__
T eval_1d_spline (T dist, T rmax, T drInv, T const A[4][4],
T const * __restrict__ coefs)
{
T res;
if (dist >= rmax)
{
res = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor(s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
res = (coefs[index+0]*(A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]) +
coefs[index+1]*(A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]) +
coefs[index+2]*(A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]) +
coefs[index+3]*(A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]));
}
return res;
}
template<typename T>
__device__ __forceinline__
T CMC_eval_1d_spline (T dist, T rmax, T drInv, T const * __restrict__ coefs)
{
T res;
if (dist >= rmax)
{
res = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor(s);
int index = (int)sf;
T t = s - sf;
res = (coefs[index+0] * (((AcudaSpline[ 0] * t + AcudaSpline[ 1]) * t + AcudaSpline[ 2]) * t + AcudaSpline[ 3]) +
coefs[index+1] * (((AcudaSpline[ 4] * t + AcudaSpline[ 5]) * t + AcudaSpline[ 6]) * t + AcudaSpline[ 7]) +
coefs[index+2] * (((AcudaSpline[ 8] * t + AcudaSpline[ 9]) * t + AcudaSpline[10]) * t + AcudaSpline[11]) +
coefs[index+3] * (((AcudaSpline[12] * t + AcudaSpline[13]) * t + AcudaSpline[14]) * t + AcudaSpline[15]) );
}
return res;
}
template<typename T>
__device__ __forceinline__
void eval_1d_spline_vgl (T dist, T rmax, T drInv, T const A[12][4],
T const * __restrict__ coefs,
T& __restrict__ u, T& __restrict__ du,
T& __restrict__ d2u)
{
if (dist >= rmax)
{
u = du = d2u = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor (s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
T c0 = coefs[index+0];
T c1 = coefs[index+1];
T c2 = coefs[index+2];
T c3 = coefs[index+3];
u = (c0 * (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]) +
c1 * (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]) +
c2 * (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]) +
c3 * (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]));
du = drInv *
(c0 * (A[4][0]*t3 + A[4][1]*t2 + A[4][2]*t + A[4][3]) +
c1 * (A[5][0]*t3 + A[5][1]*t2 + A[5][2]*t + A[5][3]) +
c2 * (A[6][0]*t3 + A[6][1]*t2 + A[6][2]*t + A[6][3]) +
c3 * (A[7][0]*t3 + A[7][1]*t2 + A[7][2]*t + A[7][3]));
d2u = drInv*drInv *
(c0 * (A[ 8][0]*t3 + A[ 8][1]*t2 + A[ 8][2]*t + A[ 8][3]) +
c1 * (A[ 9][0]*t3 + A[ 9][1]*t2 + A[ 9][2]*t + A[ 9][3]) +
c2 * (A[10][0]*t3 + A[10][1]*t2 + A[10][2]*t + A[10][3]) +
c3 * (A[11][0]*t3 + A[11][1]*t2 + A[11][2]*t + A[11][3]));
}
}
#define NAIVE_SCHEME 0
#define HORNER_SCHEME 1
#define ESTRIN_SCHEME 2
#define SCHEME2 HORNER_SCHEME
template<typename T>
__device__ __forceinline__
void CMC_eval_1d_spline_vgl (T dist, T rmax, T drInv,
T const * __restrict__ coefs,
T& __restrict__ u,
T& __restrict__ du,
T& __restrict__ d2u)
{
if (dist >= rmax)
{
u = du = d2u = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor (s);
int index = (int)sf;
T t = s - sf;
T c0 = coefs[index+0];
T c1 = coefs[index+1];
T c2 = coefs[index+2];
T c3 = coefs[index+3];
#if (SCHEME2 == HORNER_SCHEME)
u = (c0 * (((AcudaSpline[ 0*4 + 0] * t + AcudaSpline[ 0*4 + 1]) * t + AcudaSpline[ 0*4 + 2]) * t + AcudaSpline[ 0*4 + 3]) +
c1 * (((AcudaSpline[ 1*4 + 0] * t + AcudaSpline[ 1*4 + 1]) * t + AcudaSpline[ 1*4 + 2]) * t + AcudaSpline[ 1*4 + 3]) +
c2 * (((AcudaSpline[ 2*4 + 0] * t + AcudaSpline[ 2*4 + 1]) * t + AcudaSpline[ 2*4 + 2]) * t + AcudaSpline[ 2*4 + 3]) +
c3 * (((AcudaSpline[ 3*4 + 0] * t + AcudaSpline[ 3*4 + 1]) * t + AcudaSpline[ 3*4 + 2]) * t + AcudaSpline[ 3*4 + 3]));
du = drInv *
(c0 * (((AcudaSpline[ 4*4 + 0] * t + AcudaSpline[ 4*4 + 1]) * t + AcudaSpline[ 4*4 + 2]) * t + AcudaSpline[ 4*4 + 3]) +
c1 * (((AcudaSpline[ 5*4 + 0] * t + AcudaSpline[ 5*4 + 1]) * t + AcudaSpline[ 5*4 + 2]) * t + AcudaSpline[ 5*4 + 3]) +
c2 * (((AcudaSpline[ 6*4 + 0] * t + AcudaSpline[ 6*4 + 1]) * t + AcudaSpline[ 6*4 + 2]) * t + AcudaSpline[ 6*4 + 3]) +
c3 * (((AcudaSpline[ 7*4 + 0] * t + AcudaSpline[ 7*4 + 1]) * t + AcudaSpline[ 7*4 + 2]) * t + AcudaSpline[ 7*4 + 3]));
d2u = drInv * drInv *
(c0 * (((AcudaSpline[ 8*4 + 0] * t + AcudaSpline[ 8*4 + 1]) * t + AcudaSpline[ 8*4 + 2]) * t + AcudaSpline[ 8*4 + 3]) +
c1 * (((AcudaSpline[ 9*4 + 0] * t + AcudaSpline[ 9*4 + 1]) * t + AcudaSpline[ 9*4 + 2]) * t + AcudaSpline[ 9*4 + 3]) +
c2 * (((AcudaSpline[10*4 + 0] * t + AcudaSpline[10*4 + 1]) * t + AcudaSpline[10*4 + 2]) * t + AcudaSpline[10*4 + 3]) +
c3 * (((AcudaSpline[11*4 + 0] * t + AcudaSpline[11*4 + 1]) * t + AcudaSpline[11*4 + 2]) * t + AcudaSpline[11*4 + 3]));
#elif (SCHEME2 == ESTRIN_SCHEME)
T t2 = t*t;
u = (c0 * ((AcudaSpline[ 0*4 + 0] * t + AcudaSpline[ 0*4 + 1]) * t2 + (AcudaSpline[ 0*4 + 2] * t + AcudaSpline[ 0*4 + 3])) +
c1 * ((AcudaSpline[ 1*4 + 0] * t + AcudaSpline[ 1*4 + 1]) * t2 + (AcudaSpline[ 1*4 + 2] * t + AcudaSpline[ 1*4 + 3])) +
c2 * ((AcudaSpline[ 2*4 + 0] * t + AcudaSpline[ 2*4 + 1]) * t2 + (AcudaSpline[ 2*4 + 2] * t + AcudaSpline[ 2*4 + 3])) +
c3 * ((AcudaSpline[ 3*4 + 0] * t + AcudaSpline[ 3*4 + 1]) * t2 + (AcudaSpline[ 3*4 + 2] * t + AcudaSpline[ 3*4 + 3])) );
du = drInv *
(c0 * ((AcudaSpline[ 4*4 + 0] * t + AcudaSpline[ 4*4 + 1]) * t2 + (AcudaSpline[ 4*4 + 2] * t + AcudaSpline[ 4*4 + 3])) +
c1 * ((AcudaSpline[ 5*4 + 0] * t + AcudaSpline[ 5*4 + 1]) * t2 + (AcudaSpline[ 5*4 + 2] * t + AcudaSpline[ 5*4 + 3])) +
c2 * ((AcudaSpline[ 6*4 + 0] * t + AcudaSpline[ 6*4 + 1]) * t2 + (AcudaSpline[ 6*4 + 2] * t + AcudaSpline[ 6*4 + 3])) +
c3 * ((AcudaSpline[ 7*4 + 0] * t + AcudaSpline[ 7*4 + 1]) * t2 + (AcudaSpline[ 7*4 + 2] * t + AcudaSpline[ 7*4 + 3])) );
d2u = drInv * drInv *
(c0 * ((AcudaSpline[ 8*4 + 0] * t + AcudaSpline[ 8*4 + 1]) * t2 + (AcudaSpline[ 8*4 + 2] * t + AcudaSpline[ 8*4 + 3])) +
c1 * ((AcudaSpline[ 9*4 + 0] * t + AcudaSpline[ 9*4 + 1]) * t2 + (AcudaSpline[ 9*4 + 2] * t + AcudaSpline[ 9*4 + 3])) +
c2 * ((AcudaSpline[10*4 + 0] * t + AcudaSpline[10*4 + 1]) * t2 + (AcudaSpline[10*4 + 2] * t + AcudaSpline[10*4 + 3])) +
c3 * ((AcudaSpline[11*4 + 0] * t + AcudaSpline[11*4 + 1]) * t2 + (AcudaSpline[11*4 + 2] * t + AcudaSpline[11*4 + 3])) );
#else
T t2 = t*t;
T t3 = t*t2;
u = (c0 * (AcudaSpline[ 0*4 + 0] * t3 + AcudaSpline[ 0*4 + 1] * t2 + AcudaSpline[ 0*4 + 2] * t + AcudaSpline[ 0*4 + 3]) +
c1 * (AcudaSpline[ 1*4 + 0] * t3 + AcudaSpline[ 1*4 + 1] * t2 + AcudaSpline[ 1*4 + 2] * t + AcudaSpline[ 1*4 + 3]) +
c2 * (AcudaSpline[ 2*4 + 0] * t3 + AcudaSpline[ 2*4 + 1] * t2 + AcudaSpline[ 2*4 + 2] * t + AcudaSpline[ 2*4 + 3]) +
c3 * (AcudaSpline[ 3*4 + 0] * t3 + AcudaSpline[ 3*4 + 1] * t2 + AcudaSpline[ 3*4 + 2] * t + AcudaSpline[ 3*4 + 3]));
du = drInv *
(c0 * (AcudaSpline[ 4*4 + 0] * t3 + AcudaSpline[ 4*4 + 1] * t2 + AcudaSpline[ 4*4 + 2] * t + AcudaSpline[ 4*4 + 3]) +
c1 * (AcudaSpline[ 5*4 + 0] * t3 + AcudaSpline[ 5*4 + 1] * t2 + AcudaSpline[ 5*4 + 2] * t + AcudaSpline[ 5*4 + 3]) +
c2 * (AcudaSpline[ 6*4 + 0] * t3 + AcudaSpline[ 6*4 + 1] * t2 + AcudaSpline[ 6*4 + 2] * t + AcudaSpline[ 6*4 + 3]) +
c3 * (AcudaSpline[ 7*4 + 0] * t3 + AcudaSpline[ 7*4 + 1] * t2 + AcudaSpline[ 7*4 + 2] * t + AcudaSpline[ 7*4 + 3]));
d2u = drInv * drInv *
(c0 * (AcudaSpline[ 8*4 + 0] * t3 + AcudaSpline[ 8*4 + 1] * t2 + AcudaSpline[ 8*4 + 2] * t + AcudaSpline[ 8*4 + 3]) +
c1 * (AcudaSpline[ 9*4 + 0] * t3 + AcudaSpline[ 9*4 + 1] * t2 + AcudaSpline[ 9*4 + 2] * t + AcudaSpline[ 9*4 + 3]) +
c2 * (AcudaSpline[10*4 + 0] * t3 + AcudaSpline[10*4 + 1] * t2 + AcudaSpline[10*4 + 2] * t + AcudaSpline[10*4 + 3]) +
c3 * (AcudaSpline[11*4 + 0] * t3 + AcudaSpline[11*4 + 1] * t2 + AcudaSpline[11*4 + 2] * t + AcudaSpline[11*4 + 3]));
#endif
}
}
#define MAX_COEFS 32
template<typename T, int BS >
__global__ void
two_body_sum_PBC_kernel(T **R, int e1_first, int e1_last,
int e2_first, int e2_last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv, T* sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[tid>>2][tid&3] = AcudaSpline[tid];
__syncthreads();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
T mysum = (T)0.0;
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
r1[0][i*BS + tid] = myR[3*e1_first + (3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
if (ptcl1 != ptcl2 && (ptcl1 < (N1+e1_first) ) && (ptcl2 < (N2+e2_first)))
mysum += eval_1d_spline (dist, rMax, drInv, A, coefs);
}
__syncthreads();
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
T factor = (e1_first == e2_first) ? 0.5 : 1.0;
if (tid==0)
sum[blockIdx.x] += factor*shared_sum[0];
}
void
two_body_sum_PBC (float *R[], int e1_first, int e1_last, int e2_first, int e2_last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_sum_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, e1_first, e1_last, e2_first, e2_last,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
void
two_body_sum_PBC (double *R[], int e1_first, int e1_last, int e2_first, int e2_last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_sum_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, e1_first, e1_last, e2_first, e2_last,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
two_body_ratio_PBC_kernel(T **R, int first, int last,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv, T* sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = ((T)1)/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T r1[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
shared_sum[tid] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
r1[0][n] = myR[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz;
dx = myRnew[0] - r1[tid][0];
dy = myRnew[1] - r1[tid][1];
dz = myRnew[2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T delta = eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRold[0] - r1[tid][0];
dy = myRold[1] - r1[tid][1];
dz = myRold[2] - r1[tid][2];
dist = min_dist(dx, dy, dz, L, Linv);
delta -= eval_1d_spline (dist, rMax, drInv, A, coefs);
if (ptcl1 != inew && (ptcl1 < (N+first) ))
shared_sum[tid] += delta;
__syncthreads();
}
__syncthreads();
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] += shared_sum[0];
}
void
two_body_ratio_PBC (float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_ratio_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
void
two_body_ratio_PBC (double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(128);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_ratio_PBC_kernel<double,128>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
two_body_ratio_grad_PBC_kernel(T const * const * __restrict__ R,
int first, int last,
T const * __restrict__ Rnew, int inew,
T const * __restrict__ spline_coefs,
int numCoefs, T rMax,
T const * __restrict__ lattice,
T const * __restrict__ latticeInv,
bool zero, T *__restrict__ ratio_grad)
{
__shared__ T shared_grad[BS][3];
__shared__ T r1[BS][3];
__shared__ T shared_sum[BS];
__shared__ T coefs[MAX_COEFS];
int tid = threadIdx.x;
T dr = rMax /(T)(numCoefs-3);
T drInv = ((T)1)/dr;
// Safety for rounding error
rMax *= 0.999999f;
if (tid < numCoefs)
{
coefs[tid] = spline_coefs[tid];
}
shared_sum[tid] = (T)0;
shared_grad[tid][0] = (T)0;
shared_grad[tid][1] = (T)0;
shared_grad[tid][2] = (T)0;
__syncthreads();
T const * __restrict__ myR = R[blockIdx.x];
T rnew_x = Rnew[3*blockIdx.x+0];
T rnew_y = Rnew[3*blockIdx.x+1];
T rnew_z = Rnew[3*blockIdx.x+2];
T rold_x = myR[3*inew+0];
T rold_y = myR[3*inew+1];
T rold_z = myR[3*inew+2];
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if (((3*b+i)*BS + tid) < (3*N))
{
r1[0][n] = myR[3*first + (3*b+i)*BS + tid];
}
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u, delta, dist;
dx = rold_x - r1[tid][0];
dy = rold_y - r1[tid][1];
dz = rold_z - r1[tid][2];
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
delta = -CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
dx = rnew_x - r1[tid][0];
dy = rnew_y - r1[tid][1];
dz = rnew_z - r1[tid][2];
dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
delta += u;
if ((ptcl1 != inew) && (ptcl1 < (N + first) ))
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
template<typename T, int BS>
__global__ void
two_body_ratio_grad_PBC_kernel_fast (T **R, int first, int last,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv,
bool zero, T* ratio_grad)
{
int tid = threadIdx.x;
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T r1[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
*/
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
/*
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
*/
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N = last - first + 1;
int NB = (N+BS-1)/BS;
__shared__ T shared_sum[BS];
__shared__ T shared_grad[BS][3];
shared_sum[tid] = (T)0.0;
shared_grad[tid][0] = shared_grad[tid][1] = shared_grad[tid][2] = 0.0f;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
r1[0][n] = myR[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u, delta, dist;
dx = myRold[0] - r1[tid][0];
dy = myRold[1] - r1[tid][1];
dz = myRold[2] - r1[tid][2];
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
delta = -eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRnew[0] - r1[tid][0];
dy = myRnew[1] - r1[tid][1];
dz = myRnew[2] - r1[tid][2];
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs,
u, du, d2u);
delta += u;
if (ptcl1 != inew && (ptcl1 < (N+first) ))
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
__syncthreads();
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
// use_fast_image indicates that Rmax < simulation cell radius. In
// this case, we don't have to search over 27 images.
void
two_body_ratio_grad_PBC(float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], bool zero,
float ratio_grad[], int numWalkers,
bool use_fast_image)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
// fprintf(stderr, "first = %d\n", first);
// fprintf(stderr, "last = %d\n", last);
// fprintf(stderr, "inew = %d\n", inew);
// fprintf(stderr, "rMax = %1.3f\n", rMax);
if (use_fast_image)
{
hipLaunchKernelGGL(( two_body_ratio_grad_PBC_kernel_fast<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
else
{
hipLaunchKernelGGL(( two_body_ratio_grad_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
CMC_PROFILING_END();
}
void
two_body_ratio_grad_PBC(double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], bool zero,
double ratio_grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_ratio_grad_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
template<int BS>
__global__ void
two_body_NLratio_PBC_kernel(NLjobGPU<float> const * __restrict__ jobs,
int first, int last,
float const * const * __restrict__ spline_coefs,
int const * __restrict__ numCoefs,
float const * __restrict__ rMaxList,
float const * __restrict__ lattice,
float const * __restrict__ latticeInv,
float sim_cell_radius)
{
const int MAX_RATIOS = 18;
__shared__ float shared_sum[MAX_RATIOS][BS+1];
__shared__ float myRnew[MAX_RATIOS][3];
__shared__ float coefs[MAX_COEFS];
__shared__ float r1[BS][3];
float const * __restrict__ myCoefs = spline_coefs[blockIdx.x];
NLjobGPU<float> myJob = jobs[blockIdx.x];
const int myNumCoefs = numCoefs[blockIdx.x];
const int tid = threadIdx.x;
if (tid < myNumCoefs)
{
coefs[tid] = myCoefs[tid];
}
for (int i = 0; i < 3; i++)
{
if (i*BS + tid < 3*myJob.NumQuadPoints)
{
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
}
}
for (int i = 0; i < myJob.NumQuadPoints; i++)
{
shared_sum[i][tid] = (float)0;
}
__syncthreads();
const float rMax = rMaxList[blockIdx.x];
const float dr = rMax / (myNumCoefs - 3);
const float drInv = 1.0f / dr;
const int use_fast = sim_cell_radius >= rMax;
const float rold_x = myJob.R[3*myJob.Elec+0];
const float rold_y = myJob.R[3*myJob.Elec+1];
const float rold_z = myJob.R[3*myJob.Elec+2];
const int N = last - first + 1;
const int NB = N / BS + ((N % BS) ? 1 : 0);
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if (((3*b+i)*BS + tid) < (3*N))
{
r1[0][n] = myJob.R[3*first + (3*b+i)*BS + tid];
}
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
float dx = rold_x - r1[tid][0];
float dy = rold_y - r1[tid][1];
float dz = rold_z - r1[tid][2];
float dist;
if (use_fast)
{
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
}
else
{
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
}
float uOld = CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
if (use_fast)
{
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - r1[tid][0];
dy = myRnew[iq][1] - r1[tid][1];
dz = myRnew[iq][2] - r1[tid][2];
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
if ((ptcl1 != myJob.Elec) && (ptcl1 < (N + first)))
{
shared_sum[iq][tid] += CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs) - uOld;
}
}
}
else
{
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - r1[tid][0];
dy = myRnew[iq][1] - r1[tid][1];
dz = myRnew[iq][2] - r1[tid][2];
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
if ((ptcl1 != myJob.Elec) && (ptcl1 < (N + first)))
{
shared_sum[iq][tid] += CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs) - uOld;
}
}
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
{
shared_sum[iq][tid] += shared_sum[iq][tid+s];
}
}
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
{
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
}
template<int BS>
__global__ void
two_body_NLratio_PBC_kernel(NLjobGPU<double> *jobs, int first, int last,
double **spline_coefs, int *numCoefs,
double *rMaxList,
double *lattice, double *latticeInv,
double sim_cell_radius)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<double> myJob;
__shared__ double myRnew[MAX_RATIOS][3], myRold[3];
__shared__ double* myCoefs;
__shared__ int myNumCoefs;
__shared__ double rMax;
if (tid == 0)
{
myJob = jobs[blockIdx.x];
myCoefs = spline_coefs[blockIdx.x];
myNumCoefs = numCoefs[blockIdx.x];
rMax = rMaxList[blockIdx.x];
}
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
double dr = rMax/(double)(myNumCoefs-3);
double drInv = 1.0/dr;
__shared__ double coefs[MAX_COEFS];
__shared__ double r1[BS][3];
__shared__ double L[3][3], Linv[3][3];
if (tid < myNumCoefs)
coefs[tid] = myCoefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ double A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ double shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (double)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
r1[0][n] = myJob.R[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
double dx, dy, dz;
dx = myRold[0] - r1[tid][0];
dy = myRold[1] - r1[tid][1];
dz = myRold[2] - r1[tid][2];
double dist = min_dist(dx, dy, dz, L, Linv);
double uOld = eval_1d_spline (dist, rMax, drInv, A, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - r1[tid][0];
dy = myRnew[iq][1] - r1[tid][1];
dz = myRnew[iq][2] - r1[tid][2];
dist = min_dist(dx, dy, dz, L, Linv);
if (ptcl1 != myJob.Elec && (ptcl1 < (N+first)))
shared_sum[iq][tid] += eval_1d_spline (dist, rMax, drInv, A, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
void
two_body_NLratios_PBC(NLjobGPU<float> jobs[], int first, int last,
float* spline_coefs[], int numCoefs[], float rMax[],
float lattice[], float latticeInv[], float sim_cell_radius,
int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
CMC_PROFILING_BEGIN();
while (numjobs > 65535)
{
dim3 dimGrid(65535);
hipLaunchKernelGGL(( two_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
jobs += 65535;
numjobs -= 65535;
}
dim3 dimGrid(numjobs);
hipLaunchKernelGGL(( two_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
CMC_PROFILING_END();
}
void
two_body_NLratios_PBC(NLjobGPU<double> jobs[], int first, int last,
double* spline_coefs[], int numCoefs[], double rMax[],
double lattice[], double latticeInv[],
double sim_cell_radius, int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
while (numjobs > 65535)
{
dim3 dimGrid(65535);
hipLaunchKernelGGL(( two_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
jobs += 65535;
numjobs -= 65535;
}
dim3 dimGrid(numjobs);
hipLaunchKernelGGL(( two_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
}
template<typename T>
__global__ void
two_body_update_PBC_kernel (T **R, int N, int iat)
{
__shared__ T* myR;
if (threadIdx.x == 0)
myR = R[blockIdx.x];
__syncthreads();
if (threadIdx.x < 3)
myR[3*iat + threadIdx.x] = myR[3*N + threadIdx.x];
}
void
two_body_update_PBC(float *R[], int N, int iat, int numWalkers)
{
dim3 dimBlock(32);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_update_PBC_kernel<float>), dim3(dimGrid), dim3(dimBlock), 0, 0, R, N, iat);
}
void
two_body_update_PBC(double *R[], int N, int iat, int numWalkers)
{
dim3 dimBlock(3);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_update_PBC_kernel<double>), dim3(dimGrid), dim3(dimBlock), 0, 0, R, N, iat);
}
#define MAX_COEFS 32
template<typename T, int BS>
__global__ void
two_body_grad_lapl_PBC_kernel(T **R, int e1_first, int e1_last,
int e2_first, int e2_last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv,
T *gradLapl, int row_stride)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
*/
__syncthreads();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
__shared__ T sGradLapl[BS][4];
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
r1[0][i*BS + tid] = myR[3*e1_first + (3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
int offset = blockIdx.x * row_stride + 4*b1*BS + 4*e1_first;
sGradLapl[tid][0] = sGradLapl[tid][1] =
sGradLapl[tid][2] = sGradLapl[tid][3] = (T)0.0;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz, u, du, d2u;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
if (ptcl1 != ptcl2 && (ptcl1 < (N1+e1_first) ) && (ptcl2 < (N2+e2_first)))
{
du /= dist;
sGradLapl[tid][0] += du * dx;
sGradLapl[tid][1] += du * dy;
sGradLapl[tid][2] += du * dz;
sGradLapl[tid][3] -= d2u + 2.0*du;
}
}
__syncthreads();
}
for (int i=0; i<4; i++)
if ((4*b1+i)*BS + tid < 4*N1)
gradLapl[offset + i*BS +tid] += sGradLapl[0][i*BS+tid];
__syncthreads();
}
}
template<typename T, int BS>
__global__ void
two_body_grad_lapl_PBC_kernel_fast(T **R, int e1_first, int e1_last,
int e2_first, int e2_last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv,
T *gradLapl, int row_stride)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
__shared__ T sGradLapl[BS][4];
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
r1[0][i*BS + tid] = myR[3*e1_first + (3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
int offset = blockIdx.x * row_stride + 4*b1*BS + 4*e1_first;
sGradLapl[tid][0] = sGradLapl[tid][1] =
sGradLapl[tid][2] = sGradLapl[tid][3] = (T)0.0;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz, u, du, d2u;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
if (ptcl1 != ptcl2 && (ptcl1 < (N1+e1_first) ) && (ptcl2 < (N2+e2_first)))
{
du /= dist;
sGradLapl[tid][0] += du * dx;
sGradLapl[tid][1] += du * dy;
sGradLapl[tid][2] += du * dz;
sGradLapl[tid][3] -= d2u + 2.0*du;
}
}
__syncthreads();
}
for (int i=0; i<4; i++)
if ((4*b1+i)*BS + tid < 4*N1)
gradLapl[offset + i*BS +tid] += sGradLapl[0][i*BS+tid];
__syncthreads();
}
}
void
two_body_grad_lapl_PBC(float *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
float gradLapl[], int row_stride, int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
if (sim_cell_radius >= rMax)
hipLaunchKernelGGL(( two_body_grad_lapl_PBC_kernel_fast<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
else
hipLaunchKernelGGL(( two_body_grad_lapl_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
CMC_PROFILING_END();
}
void
two_body_grad_lapl_PBC(double *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[],
double gradLapl[], int row_stride, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_grad_lapl_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
}
template<typename T, int BS>
__global__ void
two_body_grad_PBC_kernel (T const * const * __restrict__ R,
int first, int last, int iat,
T const * __restrict__ spline_coefs,
int numCoefs, T rMax,
T const * __restrict__ lattice,
T const * __restrict__ latticeInv,
bool zeroOut, T * __restrict__ grad)
{
__shared__ T sGrad[BS][3];
__shared__ T r1[BS][3];
__shared__ T coefs[MAX_COEFS];
T dr = rMax/(T)(numCoefs-3);
T drInv = ((T)1)/dr;
int tid = threadIdx.x;
// Safety for rounding error
rMax *= 0.999999f;
if (tid < numCoefs)
{
coefs[tid] = spline_coefs[tid];
}
sGrad[tid][0] = (T)0;
sGrad[tid][1] = (T)0;
sGrad[tid][2] = (T)0;
__syncthreads();
T const * __restrict__ myR = R[blockIdx.x];
T r2_x = myR[3*iat+0];
T r2_y = myR[3*iat+1];
T r2_z = myR[3*iat+2];
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
for (int b = 0; b < NB; b++)
{
// Load block of positions from global memory
for (int i = 0; i < 3; i++)
{
if ((3*b+i)*BS + tid < 3*N)
{
r1[0][i*BS + tid] = myR[3*first + (3*b+i)*BS + tid];
}
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r2_x - r1[tid][0];
dy = r2_y - r1[tid][1];
dz = r2_z - r1[tid][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
if (ptcl1 != iat && ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
{
grad[3*blockIdx.x + tid] = sGrad[0][tid];
}
else
{
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
}
template<typename T, int BS>
__global__ void
two_body_grad_PBC_kernel_fast(T **R, int first, int last, int iat,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zeroOut, T *grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, r2[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3)
r2[tid] = myR[3*iat+tid];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
sGrad[tid][0] = sGrad[tid][1] = sGrad[tid][2] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
r1[0][i*BS + tid] = myR[3*first + (3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r2[0] - r1[tid][0];
dy = r2[1] - r1[tid][1];
dz = r2[2] - r1[tid][2];
T dist = min_dist_fast(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
if (ptcl1 != iat && ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
grad[3*blockIdx.x + tid] = sGrad[0][tid];
else
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
void
two_body_gradient_PBC (float *R[], int first, int last, int iat,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
bool zeroOut,
float grad[], int numWalkers)
{
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
if (sim_cell_radius >= rMax)
hipLaunchKernelGGL(( two_body_grad_PBC_kernel_fast<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
R, first, last, iat, spline_coefs, numCoefs,
rMax, lattice, latticeInv, zeroOut, grad);
else
hipLaunchKernelGGL(( two_body_grad_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
R, first, last, iat, spline_coefs, numCoefs,
rMax, lattice, latticeInv, zeroOut, grad);
CMC_PROFILING_END();
}
void
two_body_gradient_PBC (double *R[], int first, int last, int iat,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], bool zeroOut,
double grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( two_body_grad_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
R, first, last, iat, spline_coefs, numCoefs,
rMax, lattice, latticeInv, zeroOut, grad);
}
template<typename T, int BS>
__global__ void
two_body_derivs_PBC_kernel(T **R, T **gradLogPsi,
int e1_first, int e1_last,
int e2_first, int e2_last,
int numCoefs, T rMax,
T *lattice, T *latticeInv,
T **derivs)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0f/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, *myGrad, *myDerivs;
if (tid == 0)
{
myR = R[blockIdx.x];
myGrad = gradLogPsi[blockIdx.x];
myDerivs = derivs[blockIdx.x];
}
__shared__ T sderivs[MAX_COEFS][2];
// __shared__ T coefs[MAX_COEFS];
// if (tid < numCoefs)
// coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
sderivs[tid][0] = T();
sderivs[tid][1] = T();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
{
int outoff = i*BS+tid;
int inoff = outoff + 3*e1_first + 3*b1*BS;
r1[0][outoff] = myR[inoff];//[3*e1_first + (3*b1+i)*BS + tid];
sGrad[0][outoff] = myGrad[inoff];
}
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T distInv = 1.0f/dist;
T s = dist * drInv;
T sf = floorf (s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
T v0, v1, v2, v3;
// sderivs[index+0][0] += (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]);
// sderivs[index+1][0] += (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]);
// sderivs[index+2][0] += (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]);
// sderivs[index+3][0] += (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]);
v0 = (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]);
v1 = (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]);
v2 = (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]);
v3 = (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]);
for (int id=0; id<BS; id++)
if (tid == id && ptcl1 != ptcl2 && ptcl1 <= e1_last && (dist < rMax))
{
sderivs[index+0][0] += v0;
sderivs[index+1][0] += v1;
sderivs[index+2][0] += v2;
sderivs[index+3][0] += v3;
}
T prefact = (dx*sGrad[tid][0] + dy*sGrad[tid][1] + dz*sGrad[tid][2])*distInv;
T du0 = drInv * (A[4][0]*t3 + A[4][1]*t2 + A[4][2]*t + A[4][3]);
T du1 = drInv * (A[5][0]*t3 + A[5][1]*t2 + A[5][2]*t + A[5][3]);
T du2 = drInv * (A[6][0]*t3 + A[6][1]*t2 + A[6][2]*t + A[6][3]);
T du3 = drInv * (A[7][0]*t3 + A[7][1]*t2 + A[7][2]*t + A[7][3]);
// This is the dot (gradu, grad_log_psi) term.
v0 = 2.0f* prefact * du0;
v1 = 2.0f* prefact * du1;
v2 = 2.0f* prefact * du2;
v3 = 2.0f* prefact * du3;
// This is the lapl u term
v0 -= drInv*drInv*(A[ 8][0]*t3 + A[ 8][1]*t2 + A[ 8][2]*t + A[ 8][3]) + 2.0f*du0*distInv;
v1 -= drInv*drInv*(A[ 9][0]*t3 + A[ 9][1]*t2 + A[ 9][2]*t + A[ 9][3]) + 2.0f*du1*distInv;
v2 -= drInv*drInv*(A[10][0]*t3 + A[10][1]*t2 + A[10][2]*t + A[10][3]) + 2.0f*du2*distInv;
v3 -= drInv*drInv*(A[11][0]*t3 + A[11][1]*t2 + A[11][2]*t + A[11][3]) + 2.0f*du3*distInv;
for (int id=0; id<BS; id++)
if (tid == id && ptcl1 != ptcl2 && ptcl1 <= e1_last && (dist < rMax))
{
sderivs[index+0][1] += v0;
sderivs[index+1][1] += v1;
sderivs[index+2][1] += v2;
sderivs[index+3][1] += v3;
}
}
__syncthreads();
}
}
// if (e1_first == e2_first)
sderivs[tid][0] *= 0.5f;
sderivs[tid][1] *= 0.5f;
if (tid < 2*numCoefs)
myDerivs[tid] = -sderivs[0][tid];
if (tid+BS < 2*numCoefs)
myDerivs[tid+BS] = sderivs[0][tid+BS];
}
void
two_body_derivs_PBC(float *R[], float *gradLogPsi[], int e1_first, int e1_last,
int e2_first, int e2_last,
int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
float *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
hipLaunchKernelGGL(( two_body_derivs_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
else
hipLaunchKernelGGL(( two_body_derivs_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
}
void
two_body_derivs_PBC(double *R[], double *gradLogPsi[], int e1_first, int e1_last,
int e2_first, int e2_last,
int numCoefs, double rMax,
double lattice[], double latticeInv[], double sim_cell_radius,
double *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
hipLaunchKernelGGL(( two_body_derivs_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
else
hipLaunchKernelGGL(( two_body_derivs_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
}
////////////////////////////////////////////////////////////////
// One-body routines //
////////////////////////////////////////////////////////////////
template<typename T, int BS >
__global__ void
one_body_sum_PBC_kernel(T *C, T **R, int cfirst, int clast,
int efirst, int elast,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, T *sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T rc[BS][3], re[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[tid>>2][tid&3] = AcudaSpline[tid];
__syncthreads();
int Nc = clast - cfirst + 1;
int Ne = elast - efirst + 1;
int NBc = Nc/BS + ((Nc % BS) ? 1 : 0);
int NBe = Ne/BS + ((Ne % BS) ? 1 : 0);
T mysum = (T)0.0;
for (int bc=0; bc < NBc; bc++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*bc+i)*BS + tid < 3*Nc)
rc[0][i*BS + tid] = C[3*cfirst + (3*bc+i)*BS + tid];
__syncthreads();
int ptcl1 = cfirst+bc*BS + tid;
for (int be=0; be < NBe; be++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*be+i)*BS + tid < 3*Ne)
re[0][i*BS + tid] = myR[3*efirst + (3*be+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (be+1)*BS < Ne ? BS : Ne-be*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = efirst + be*BS+j;
T dx, dy, dz;
dx = re[j][0] - rc[tid][0];
dy = re[j][1] - rc[tid][1];
dz = re[j][2] - rc[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
if ((ptcl1 < (Nc+cfirst) ) && (ptcl2 < (Ne+efirst)))
mysum += eval_1d_spline (dist, rMax, drInv, A, coefs);
}
}
__syncthreads();
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] += shared_sum[0];
}
void
one_body_sum_PBC (float C[], float *R[], int cfirst, int clast, int efirst, int elast,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_sum_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, cfirst, clast, efirst, elast,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
void
one_body_sum_PBC (double C[], double *R[], int cfirst, int clast, int efirst, int elast,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_sum_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, cfirst, clast, efirst, elast,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
one_body_ratio_PBC_kernel(T *C, T **R, int cfirst, int clast,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, T *sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int Nc = clast - cfirst + 1;
int NB = Nc/BS + ((Nc % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
shared_sum[tid] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*Nc)
c[0][n] = C[3*cfirst + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = cfirst+b*BS + tid;
T dx, dy, dz;
dx = myRnew[0] - c[tid][0];
dy = myRnew[1] - c[tid][1];
dz = myRnew[2] - c[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T delta = eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
dist = min_dist(dx, dy, dz, L, Linv);
delta -= eval_1d_spline (dist, rMax, drInv, A, coefs);
if (ptcl1 < (Nc+cfirst) )
shared_sum[tid] += delta;
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] += shared_sum[0];
}
void
one_body_ratio_PBC (float C[], float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_ratio_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
void
one_body_ratio_PBC (double C[], double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(128);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_ratio_PBC_kernel<double,128>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
one_body_ratio_grad_PBC_kernel(T *C, T **R, int cfirst, int clast,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv, bool zero,
T *ratio_grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T c[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
*/
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
/*
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
int index=0;
__shared__ T images[27][3];
if (tid < 3)
for (T i=-1.0; i<=1.001; i+=1.0)
for (T j=-1.0; j<=1.001; j+=1.0)
for (T k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
*/
__syncthreads();
int Nc = clast - cfirst + 1;
int NB = Nc/BS + ((Nc % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
__shared__ T shared_grad[BS][3];
shared_sum[tid] = (T)0.0;
shared_grad[tid][0] = shared_grad[tid][1] = shared_grad[tid][2] = 0.0f;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*Nc)
c[0][n] = C[3*cfirst + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = cfirst+b*BS + tid;
T dx, dy, dz, dist, delta, u, du, d2u;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
delta =- CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
dx = myRnew[0] - c[tid][0];
dy = myRnew[1] - c[tid][1];
dz = myRnew[2] - c[tid][2];
dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
delta += u;
if (ptcl1 < (Nc+cfirst) )
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
template<typename T, int BS>
__global__ void
one_body_ratio_grad_PBC_kernel_fast(T *C, T **R, int cfirst, int clast,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zero,
T *ratio_grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int Nc = clast - cfirst + 1;
int NB = Nc/BS + ((Nc % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
__shared__ T shared_grad[BS][3];
shared_sum[tid] = (T)0.0;
shared_grad[tid][0] = shared_grad[tid][1] = shared_grad[tid][2] = 0.0f;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*Nc)
c[0][n] = C[3*cfirst + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = cfirst+b*BS + tid;
T dx, dy, dz, dist, delta, u, du, d2u;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
dist = min_dist_fast(dx, dy, dz, L, Linv);
delta =- eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRnew[0] - c[tid][0];
dy = myRnew[1] - c[tid][1];
dz = myRnew[2] - c[tid][2];
dist = min_dist_fast(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
delta += u;
if (ptcl1 < (Nc+cfirst) )
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
void
one_body_ratio_grad_PBC (float C[], float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], bool zero,
float ratio_grad[], int numWalkers,
bool use_fast_image)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
CMC_PROFILING_BEGIN();
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
// if (use_fast_image)
// one_body_ratio_grad_kernel_fast<float,BS><<<dimGrid,dimBlock>>>
// (C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
// lattice, latticeInv, zero, ratio_grad);
// else
hipLaunchKernelGGL(( one_body_ratio_grad_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
CMC_PROFILING_END();
}
void
one_body_ratio_grad_PBC (double C[], double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], bool zero,
double ratio_grad[], int numWalkers, bool use_fast_image)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
// if (use_fast_image)
// one_body_ratio_grad_kernel_fast<double,BS><<<dimGrid,dimBlock>>>
// (C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
// lattice, latticeInv, zero, ratio_grad);
// else
hipLaunchKernelGGL(( one_body_ratio_grad_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
template<typename T>
__global__ void
one_body_update_kernel (T **R, int N, int iat)
{
__shared__ T* myR;
if (threadIdx.x == 0)
myR = R[blockIdx.x];
__syncthreads();
if (threadIdx.x < 3)
myR[3*iat + threadIdx.x] = myR[3*N + threadIdx.x];
}
void
one_body_update(float *R[], int N, int iat, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(32);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_update_kernel<float>), dim3(dimGrid), dim3(dimBlock), 0, 0, R, N, iat);
}
void
one_body_update(double *R[], int N, int iat, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(3);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_update_kernel<double>), dim3(dimGrid), dim3(dimBlock), 0, 0, R, N, iat);
}
template<typename T, int BS>
__global__ void
one_body_grad_lapl_PBC_kernel(T *C, T **R, int cfirst, int clast,
int efirst, int elast,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv,
T *gradLapl, int row_stride)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r[BS][3], c[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
// if (tid == 31)
// printf ("1) coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f\n",
// coefs[0], coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7]);
int index=0;
__shared__ T images[27][3];
if (tid < 3)
for (T i=-1.0; i<=1.001; i+=1.0)
for (T j=-1.0; j<=1.001; j+=1.0)
for (T k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__syncthreads();
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
*/
__syncthreads();
int Nc = clast - cfirst + 1;
int Ne = elast - efirst + 1;
int NBc = (Nc+BS-1)/BS;
int NBe = (Ne+BS-1)/BS;
__shared__ T sGradLapl[BS][4];
for (int be=0; be < NBe; be++)
{
// if (tid == 31)
// printf ("2) coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f\n",
// coefs[0], coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7]);
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*be+i)*BS + tid < 3*Ne)
r[0][i*BS + tid] = myR[3*efirst + (3*be+i)*BS + tid];
__syncthreads();
int eptcl = efirst+be*BS + tid;
int offset = blockIdx.x * row_stride + 4*be*BS + 4*efirst;
sGradLapl[tid][0] = sGradLapl[tid][1] =
sGradLapl[tid][2] = sGradLapl[tid][3] = (T)0.0;
for (int bc=0; bc < NBc; bc++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*bc+i)*BS + tid < 3*Nc)
c[0][i*BS + tid] = C[3*cfirst + (3*bc+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = ((bc+1)*BS < Nc) ? BS : Nc-bc*BS;
for (int j=0; j<end; j++)
{
int cptcl = cfirst + bc*BS+j;
T dx, dy, dz, u, du, d2u;
dx = r[tid][0] - c[j][0];
dy = r[tid][1] - c[j][1];
dz = r[tid][2] - c[j][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
// if (isinf(coefs[0]))
// printf ("3) c0=%1.5f coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f tid=%d\n", c0,
// coefs[0], spline_coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7], tid);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
// if (isinf(coefs[0]))
// printf ("4) coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f tid=%d\n",
// coefs[0], coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7], tid);
// printf("drInv=%1.5f dist=%1.5f coefs[1]=%1.5f A[0]=%1.5f\n",
// drInv, dist, coefs[1], A[0]);
if (cptcl < (Nc+cfirst) && (eptcl < (Ne+efirst)))
{
du /= dist;
sGradLapl[tid][0] -= du * dx;
sGradLapl[tid][1] -= du * dy;
sGradLapl[tid][2] -= du * dz;
sGradLapl[tid][3] -= d2u + 2.0*du;
}
}
__syncthreads();
}
__syncthreads();
for (int i=0; i<4; i++)
if ((4*be+i)*BS + tid < 4*Ne)
gradLapl[offset + i*BS +tid] += sGradLapl[0][i*BS+tid];
__syncthreads();
}
}
void
one_body_grad_lapl_PBC(float C[], float *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[],
float gradLapl[], int row_stride, int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_grad_lapl_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
}
void
one_body_grad_lapl_PBC(double C[], double *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[],
double gradLapl[], int row_stride, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_grad_lapl_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
}
template<int BS>
__global__ void
one_body_NLratio_PBC_kernel(NLjobGPU<float> *jobs, float *C, int first, int last,
float *spline_coefs, int numCoefs, float rMax,
float *lattice, float *latticeInv)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<float> myJob;
__shared__ float myRnew[MAX_RATIOS][3], myRold[3];
if (tid == 0)
myJob = jobs[blockIdx.x];
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
float dr = rMax/(float)(numCoefs-3);
float drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
__shared__ float coefs[MAX_COEFS];
__shared__ float c[BS][3];
/*
__shared__ float L[3][3], Linv[3][3];
*/
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
/*
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
int index=0;
__shared__ float images[27][3];
if (tid < 3)
for (float i=-1.0; i<=1.001; i+=1.0)
for (float j=-1.0; j<=1.001; j+=1.0)
for (float k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__syncthreads();
__shared__ float A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
*/
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ float shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (float)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
c[0][n] = C[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
float dx, dy, dz;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
float dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
float uOld = CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - c[tid][0];
dy = myRnew[iq][1] - c[tid][1];
dz = myRnew[iq][2] - c[tid][2];
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
if (ptcl1 < (N+first))
shared_sum[iq][tid] += CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
template<int BS>
__global__ void
one_body_NLratio_PBC_kernel_fast(NLjobGPU<float> *jobs, float *C, int first, int last,
float *spline_coefs, int numCoefs, float rMax,
float *lattice, float *latticeInv)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<float> myJob;
__shared__ float myRnew[MAX_RATIOS][3], myRold[3];
if (tid == 0)
myJob = jobs[blockIdx.x];
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
float dr = rMax/(float)(numCoefs-3);
float drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
__shared__ float coefs[MAX_COEFS];
__shared__ float c[BS][3];
__shared__ float L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ float A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ float shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (float)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
c[0][n] = C[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
float dx, dy, dz;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
float dist = min_dist_fast(dx, dy, dz, L, Linv);
float uOld = eval_1d_spline (dist, rMax, drInv, A, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - c[tid][0];
dy = myRnew[iq][1] - c[tid][1];
dz = myRnew[iq][2] - c[tid][2];
dist = min_dist_fast(dx, dy, dz, L, Linv);
if (ptcl1 < (N+first))
shared_sum[iq][tid] += eval_1d_spline (dist, rMax, drInv, A, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
template<int BS>
__global__ void
one_body_NLratio_PBC_kernel(NLjobGPU<double> *jobs, double *C, int first, int last,
double *spline_coefs, int numCoefs, double rMax,
double *lattice, double *latticeInv)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<double> myJob;
__shared__ double myRnew[MAX_RATIOS][3], myRold[3];
if (tid == 0)
myJob = jobs[blockIdx.x];
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
double dr = rMax/(double)(numCoefs-3);
double drInv = 1.0/dr;
__shared__ double coefs[MAX_COEFS];
__shared__ double c[BS][3];
__shared__ double L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ double images[27][3];
int index=0;
if (tid < 3)
for (float i=-1.0; i<=1.001; i+=1.0)
for (float j=-1.0; j<=1.001; j+=1.0)
for (float k=-1.0; k<=1.001; k+=1.0)
{
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__syncthreads();
__shared__ double A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ double shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (double)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
c[0][n] = C[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
double dx, dy, dz;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
double dist = min_dist(dx, dy, dz, L, Linv, images);
double uOld = eval_1d_spline (dist, rMax, drInv, A, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - c[tid][0];
dy = myRnew[iq][1] - c[tid][1];
dz = myRnew[iq][2] - c[tid][2];
dist = min_dist(dx, dy, dz, L, Linv, images);
if (ptcl1 != myJob.Elec && (ptcl1 < (N+first)))
shared_sum[iq][tid] += eval_1d_spline (dist, rMax, drInv, A, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
void
one_body_NLratios_PBC(NLjobGPU<float> jobs[], float C[], int first, int last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
CMC_PROFILING_BEGIN();
while (numjobs > 65535)
{
dim3 dimGrid(65535);
if (rMax <= sim_cell_radius)
{
// fprintf (stderr, "Using fast J1 NL kernel.\n");
hipLaunchKernelGGL(( one_body_NLratio_PBC_kernel_fast<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
else
{
// fprintf (stderr, "Using slow J1 NL kernel.\n");
hipLaunchKernelGGL(( one_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
numjobs -= 65535;
jobs += 65535;
}
dim3 dimGrid(numjobs);
if (rMax <= sim_cell_radius)
{
// fprintf (stderr, "Using fast J1 NL kernel.\n");
hipLaunchKernelGGL(( one_body_NLratio_PBC_kernel_fast<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
else
{
// fprintf (stderr, "Using slow J1 NL kernel.\n");
hipLaunchKernelGGL(( one_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
CMC_PROFILING_END();
}
void
one_body_NLratios_PBC(NLjobGPU<double> jobs[], double C[], int first, int last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
int blockx = numjobs % 65535;
int blocky = numjobs / 65535 + 1;
dim3 dimGrid(blockx, blocky);
hipLaunchKernelGGL(( one_body_NLratio_PBC_kernel<BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
template<typename T, int BS>
__global__ void
one_body_grad_PBC_kernel(T **R, int iat, T *C, int first, int last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zeroOut, T* grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, r[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3)
r[tid] = myR[3*iat+tid];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T c[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int index=0;
__shared__ T images[27][3];
if (tid < 3)
for (T i=-1.0; i<=1.001; i+=1.0)
for (T j=-1.0; j<=1.001; j+=1.0)
for (T k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
*/
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
sGrad[tid][0] = sGrad[tid][1] = sGrad[tid][2] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
c[0][i*BS + tid] = C[3*first + (3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r[0] - c[tid][0];
dy = r[1] - c[tid][1];
dz = r[2] - c[tid][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
if (ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
grad[3*blockIdx.x + tid] = sGrad[0][tid];
else
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
template<typename T, int BS>
__global__ void
one_body_grad_PBC_kernel_fast(T **R, int iat, T *C, int first, int last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zeroOut, T *grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, r[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3)
r[tid] = myR[3*iat+tid];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
sGrad[tid][0] = sGrad[tid][1] = sGrad[tid][2] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
c[0][i*BS + tid] = C[3*first + (3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r[0] - c[tid][0];
dy = r[1] - c[tid][1];
dz = r[2] - c[tid][2];
T dist = min_dist_fast(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
if (ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
grad[3*blockIdx.x + tid] = sGrad[0][tid];
else
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
void
one_body_gradient_PBC (float *Rlist[], int iat, float C[], int first, int last,
float spline_coefs[], int num_coefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
bool zeroSum, float grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
// if (sim_cell_radius >= rMax)
// one_body_grad_kernel_fast<float,BS><<<dimGrid,dimBlock>>>
// (Rlist, iat, C, first, last, spline_coefs, num_coefs, rMax,
// L, Linv, zeroSum, grad);
// else
hipLaunchKernelGGL(( one_body_grad_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
Rlist, iat, C, first, last, spline_coefs, num_coefs, rMax,
lattice, latticeInv, zeroSum, grad);
CMC_PROFILING_END();
}
void
one_body_gradient_PBC (double *Rlist[], int iat, double C[], int first, int last,
double spline_coefs[], int num_coefs, double rMax,
double L[], double Linv[], bool zeroSum,
double grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
hipLaunchKernelGGL(( one_body_grad_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, gpu::kernelStream,
Rlist, iat, C, first, last, spline_coefs, num_coefs, rMax,
L, Linv, zeroSum, grad);
}
template<typename T, int BS>
__global__ void
one_body_derivs_PBC_kernel(T* C, T **R, T **gradLogPsi,
int cfirst, int clast,
int efirst, int elast,
int numCoefs, T rMax,
T *lattice, T *latticeInv,
T **derivs)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, *myGrad, *myDerivs;
if (tid == 0)
{
myR = R[blockIdx.x];
myGrad = gradLogPsi[blockIdx.x];
myDerivs = derivs[blockIdx.x];
}
__shared__ T sderivs[MAX_COEFS][2];
__shared__ T r[BS][3], c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
sderivs[tid][0] = T();
sderivs[tid][1] = T();
int Nc = clast - cfirst + 1;
int Ne = elast - efirst + 1;
int NBc = (Nc+BS-1)/BS;
int NBe = (Ne+BS-1)/BS;
__shared__ T sGrad[BS][3];
for (int be=0; be < NBe; be++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*be+i)*BS + tid < 3*Ne)
{
int outoff = i*BS+tid;
int inoff = outoff + 3*efirst + 3*be*BS;
r[0][outoff] = myR[inoff];
sGrad[0][outoff] = myGrad[inoff];
}
__syncthreads();
int eptcl = efirst+be*BS + tid;
for (int bc=0; bc < NBc; bc++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*bc+i)*BS + tid < 3*Nc)
c[0][i*BS + tid] = C[3*cfirst + (3*bc+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = min(BS, Nc-bc*BS);
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = c[j][0] - r[tid][0];
dy = c[j][1] - r[tid][1];
dz = c[j][2] - r[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T distInv = 1.0f/dist;
T s = dist * drInv;
T sf = floorf (s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
T v0 = (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]);
T v1 = (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]);
T v2 = (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]);
T v3 = (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]);
for (int id=0; id<BS; id++)
if (tid == id && eptcl <= elast && (dist < rMax))
{
sderivs[index+0][0] += v0;
sderivs[index+1][0] += v1;
sderivs[index+2][0] += v2;
sderivs[index+3][0] += v3;
}
T prefact = (dx*sGrad[tid][0] + dy*sGrad[tid][1] + dz*sGrad[tid][2])*distInv;
T du0 = drInv * (A[4][0]*t3 + A[4][1]*t2 + A[4][2]*t + A[4][3]);
T du1 = drInv * (A[5][0]*t3 + A[5][1]*t2 + A[5][2]*t + A[5][3]);
T du2 = drInv * (A[6][0]*t3 + A[6][1]*t2 + A[6][2]*t + A[6][3]);
T du3 = drInv * (A[7][0]*t3 + A[7][1]*t2 + A[7][2]*t + A[7][3]);
// This is the dot (gradu, grad_log_psi) term.
v0 = 2.0f* prefact * du0;
v1 = 2.0f* prefact * du1;
v2 = 2.0f* prefact * du2;
v3 = 2.0f* prefact * du3;
// This is the lapl u term
v0 -= drInv*drInv*(A[ 8][0]*t3 + A[ 8][1]*t2 + A[ 8][2]*t + A[ 8][3]) + 2.0f*du0*distInv;
v1 -= drInv*drInv*(A[ 9][0]*t3 + A[ 9][1]*t2 + A[ 9][2]*t + A[ 9][3]) + 2.0f*du1*distInv;
v2 -= drInv*drInv*(A[10][0]*t3 + A[10][1]*t2 + A[10][2]*t + A[10][3]) + 2.0f*du2*distInv;
v3 -= drInv*drInv*(A[11][0]*t3 + A[11][1]*t2 + A[11][2]*t + A[11][3]) + 2.0f*du3*distInv;
for (int id=0; id<BS; id++)
if (tid == id && eptcl <= elast && (dist < rMax))
{
sderivs[index+0][1] += v0;
sderivs[index+1][1] += v1;
sderivs[index+2][1] += v2;
sderivs[index+3][1] += v3;
}
}
__syncthreads();
}
}
sderivs[tid][1] *= 0.5f;
if (tid < 2*numCoefs)
myDerivs[tid] = -sderivs[0][tid];
if (tid+BS < 2*numCoefs)
myDerivs[tid+BS] = -sderivs[0][tid+BS];
}
void
one_body_derivs_PBC(float C[], float *R[], float *gradLogPsi[],
int cfirst, int clast,
int efirst, int elast,
int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
float *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
hipLaunchKernelGGL(( one_body_derivs_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
else
hipLaunchKernelGGL(( one_body_derivs_PBC_kernel<float,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
}
void
one_body_derivs_PBC(double C[], double *R[], double *gradLogPsi[],
int cfirst, int clast,
int efirst, int elast,
int numCoefs, double rMax,
double lattice[], double latticeInv[], double sim_cell_radius,
double *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
hipLaunchKernelGGL(( one_body_derivs_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
else
hipLaunchKernelGGL(( one_body_derivs_PBC_kernel<double,BS>), dim3(dimGrid),dim3(dimBlock), 0, 0,
C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
}
void testPBC()
{
dim3 dimBlock(32);
dim3 dimGrid(1000);
float *R[1000];
float L[9], Linv[9];
float spline_coefs[10];
float dr = 0.1;
float sum[1000];
hipLaunchKernelGGL(( two_body_sum_PBC_kernel<float,32>), dim3(dimGrid),dim3(dimBlock), 0, 0, R, 0, 100, 0, 100, spline_coefs, 10, dr,
L, Linv, sum);
}
| bfb50d4026a60e6ca0364dd9be76af793bd624af.cu |
#define MAX_SPLINES 100
#include <stdio.h>
#include "BsplineJastrowCudaPBC.h"
#include "../../CUDA/gpu_misc.h"
bool AisInitializedPBC = false;
static bool CMC_profile = false;
void CMC_profileSample(const char *function, float msec)
{
if (strcmp(function, "two_body_NLratios_PBC"))
{
return;
}
printf("%s: %1.3e msec\n", function, msec);
}
#define CMC_PROFILING_BEGIN() \
cudaMemcpyToSymbolAsync(CMC_L, lattice, sizeof(CMC_L), 0, cudaMemcpyDeviceToDevice, gpu::kernelStream); \
cudaMemcpyToSymbolAsync(CMC_Linv, latticeInv, sizeof(CMC_Linv), 0, cudaMemcpyDeviceToDevice, gpu::kernelStream); \
cudaEvent_t start; \
cudaEvent_t stop; \
if (CMC_profile) { \
cudaEventCreate(&start); \
cudaEventCreate(&stop); \
cudaGetLastError(); \
cudaEventRecord(start); \
}
#define CMC_PROFILING_END() \
if (CMC_profile) { \
cudaEventRecord(stop); \
cudaEventSynchronize(stop); \
float time = 0.0f; \
cudaEventElapsedTime(&time, start, stop); \
cudaEventDestroy(start); \
cudaEventDestroy(stop); \
CMC_profileSample(__FUNCTION__, time); \
} \
if (cudaGetLastError()) { printf("ERRROR!!!\n"); exit(1); }
static __constant__ float CMC_L[3][3];
static __constant__ float CMC_Linv[3][3];
template<typename T>
__device__ __forceinline__
T CMC_min_dist_fast(T& __restrict__ x, T& __restrict__ y, T& __restrict__ z)
{
T u0 = CMC_Linv[0][0]*x + CMC_Linv[1][0]*y + CMC_Linv[2][0]*z;
T u1 = CMC_Linv[0][1]*x + CMC_Linv[1][1]*y + CMC_Linv[2][1]*z;
T u2 = CMC_Linv[0][2]*x + CMC_Linv[1][2]*y + CMC_Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = CMC_L[0][0]*u0 + CMC_L[1][0]*u1 + CMC_L[2][0]*u2;
y = CMC_L[0][1]*u0 + CMC_L[1][1]*u1 + CMC_L[2][1]*u2;
z = CMC_L[0][2]*u0 + CMC_L[1][2]*u1 + CMC_L[2][2]*u2;
return sqrt (x*x + y*y + z*z);
}
template<typename T>
__device__ __forceinline__
T CMC_min_dist_only(T x, T y, T z)
{
T u0 = CMC_Linv[0][0]*x + CMC_Linv[1][0]*y + CMC_Linv[2][0]*z;
T u1 = CMC_Linv[0][1]*x + CMC_Linv[1][1]*y + CMC_Linv[2][1]*z;
T u2 = CMC_Linv[0][2]*x + CMC_Linv[1][2]*y + CMC_Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = CMC_L[0][0]*u0 + CMC_L[1][0]*u1 + CMC_L[2][0]*u2;
y = CMC_L[0][1]*u0 + CMC_L[1][1]*u1 + CMC_L[2][1]*u2;
z = CMC_L[0][2]*u0 + CMC_L[1][2]*u1 + CMC_L[2][2]*u2;
T d2min = x*x + y*y + z*z;
#pragma unroll
for (int i = -1; i <= 1; i++)
{
#pragma unroll
for (int j = -1; j <= 1; j++)
{
#pragma unroll
for (int k = -1; k <= 1; k++)
{
T xnew = CMC_L[0][0]*(u0+i) + CMC_L[1][0]*(u1+j) + CMC_L[2][0]*(u2+k);
T ynew = CMC_L[0][1]*(u0+i) + CMC_L[1][1]*(u1+j) + CMC_L[2][1]*(u2+k);
T znew = CMC_L[0][2]*(u0+i) + CMC_L[1][2]*(u1+j) + CMC_L[2][2]*(u2+k);
T d2 = xnew*xnew + ynew*ynew + znew*znew;
d2min = min(d2, d2min);
}
}
}
return sqrt(d2min);
}
template<typename T>
__device__ __forceinline__
T CMC_min_dist(T& __restrict__ x, T& __restrict__ y, T& __restrict__ z)
{
T u0 = CMC_Linv[0][0]*x + CMC_Linv[1][0]*y + CMC_Linv[2][0]*z;
T u1 = CMC_Linv[0][1]*x + CMC_Linv[1][1]*y + CMC_Linv[2][1]*z;
T u2 = CMC_Linv[0][2]*x + CMC_Linv[1][2]*y + CMC_Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = CMC_L[0][0]*u0 + CMC_L[1][0]*u1 + CMC_L[2][0]*u2;
y = CMC_L[0][1]*u0 + CMC_L[1][1]*u1 + CMC_L[2][1]*u2;
z = CMC_L[0][2]*u0 + CMC_L[1][2]*u1 + CMC_L[2][2]*u2;
T d2min = x*x + y*y + z*z;
#pragma unroll
for (int i = -1; i <= 1; i++)
{
#pragma unroll
for (int j = -1; j <= 1; j++)
{
#pragma unroll
for (int k = -1; k <= 1; k++)
{
T xnew = CMC_L[0][0]*(u0+i) + CMC_L[1][0]*(u1+j) + CMC_L[2][0]*(u2+k);
T ynew = CMC_L[0][1]*(u0+i) + CMC_L[1][1]*(u1+j) + CMC_L[2][1]*(u2+k);
T znew = CMC_L[0][2]*(u0+i) + CMC_L[1][2]*(u1+j) + CMC_L[2][2]*(u2+k);
T d2new = xnew*xnew + ynew*ynew + znew*znew;
if (d2new < d2min)
{
x = xnew;
y = ynew;
z = znew;
d2min = d2new;
}
}
}
}
return sqrt(d2min);
}
// void
// createCudaSplines (float rmax, int N,
// float f[], float df[], float d2f[],
// int &fSpline, int &dfSpline, int &d2fSpline)
// {
// cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
// cudaArray *fArray, *dfArray, *d2fArray;
// cudaMallocArray( &fArray, &channelDesc, N);
// cudaMallocArray( &dfArray, &channelDesc, N);
// cudaMallocArray(&d2fArray, &channelDesc, N);
// cudaMemcpyToArray(fArray, N,1, f,N*sizeof(float),cudaMemcpyHostToDevice);
// cudaMemcpyToArray(dfArray, N,1, df,N*sizeof(float),cudaMemcpyHostToDevice);
// cudaMemcpyToArray(d2fArray,N,1,d2f,N*sizeof(float),cudaMemcpyHostToDevice);
// cudaBindTextureToArray(texSplines[fSpline=curTex++], fArray);
// cudaBindTextureToArray(texSplines[dfSpline=curTex++], dfArray);
// cudaBindTextureToArray(texSplines[d2fSpline=curTex++], d2fArray);
// }
template<typename T>
__device__ __forceinline__
T min_dist (T& __restrict__ x, T& __restrict__ y, T& __restrict__ z,
T const L[3][3], T const Linv[3][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
y = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
z = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
T d2min = x*x + y*y + z*z;
#pragma unroll
for (int i = -1; i <= 1; i++)
{
#pragma unroll
for (int j = -1; j <= 1; j++)
{
#pragma unroll
for (int k = -1; k <= 1; k++)
{
T xnew = L[0][0]*(u0+i) + L[1][0]*(u1+j) + L[2][0]*(u2+k);
T ynew = L[0][1]*(u0+i) + L[1][1]*(u1+j) + L[2][1]*(u2+k);
T znew = L[0][2]*(u0+i) + L[1][2]*(u1+j) + L[2][2]*(u2+k);
T d2 = xnew*xnew + ynew*ynew + znew*znew;
if (d2 < d2min)
{
d2min = d2;
x = xnew;
y = ynew;
z = znew;
}
}
}
}
return sqrt(d2min);
}
template<typename T>
__device__ __forceinline__
T min_dist_fast (T& __restrict__ x, T& __restrict__ y, T& __restrict__ z,
T const L[3][3], T const Linv[3][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
y = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
z = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
return sqrt(x*x + y*y + z*z);
}
template<typename T>
__device__ __forceinline__
T min_dist (T& __restrict__ x, T& __restrict__ y, T& __restrict__ z,
T const L[3][3], T const Linv[3][3],
T const images[27][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
T xtmp = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
T ytmp = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
T ztmp = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
x = xtmp;
y = ytmp;
z = ztmp;
T d2min = xtmp*xtmp + ytmp*ytmp + ztmp*ztmp;
for (int i=0; i<27; i++)
{
T xnew = xtmp + images[i][0];
T ynew = ytmp + images[i][1];
T znew = ztmp + images[i][2];
T d2 = xnew*xnew + ynew*ynew + znew*znew;
if (d2 < d2min)
{
x = xnew;
y = ynew;
z = znew;
d2min = d2;
}
// __syncthreads(); // XXXJCW: this doesn't appear to be needed
}
return sqrt(d2min);
}
template<typename T>
__device__ __forceinline__
T min_dist_only (T x, T y, T z,
T const L[3][3], T const Linv[3][3],
T const images[27][3])
{
T u0 = Linv[0][0]*x + Linv[1][0]*y + Linv[2][0]*z;
T u1 = Linv[0][1]*x + Linv[1][1]*y + Linv[2][1]*z;
T u2 = Linv[0][2]*x + Linv[1][2]*y + Linv[2][2]*z;
u0 -= rint(u0);
u1 -= rint(u1);
u2 -= rint(u2);
x = L[0][0]*u0 + L[1][0]*u1 + L[2][0]*u2;
y = L[0][1]*u0 + L[1][1]*u1 + L[2][1]*u2;
z = L[0][2]*u0 + L[1][2]*u1 + L[2][2]*u2;
T d2min = x*x + y*y + z*z;
for (int i=0; i<27; i++)
{
T xnew = x + images[i][0];
T ynew = y + images[i][1];
T znew = z + images[i][2];
T d2 = xnew*xnew + ynew*ynew + znew*znew;
d2min = min (d2min, d2);
// __syncthreads(); // XXXJCW: this doesn't appear to be needed
}
return sqrt(d2min);
}
__constant__ float AcudaSpline[48];
__constant__ double AcudaSpline_double[48];
void
cuda_spline_init_PBC()
{
float A_h[48] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,
1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0,
0.0, -0.5, 1.0, -0.5,
0.0, 1.5, -2.0, 0.0,
0.0, -1.5, 1.0, 0.5,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, -1.0, 1.0,
0.0, 0.0, 3.0, -2.0,
0.0, 0.0, -3.0, 1.0,
0.0, 0.0, 1.0, 0.0
};
cudaMemcpyToSymbol(AcudaSpline, A_h, 48*sizeof(float), 0,
cudaMemcpyHostToDevice);
double A_d[48] = {-1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,
1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0,
0.0, -0.5, 1.0, -0.5,
0.0, 1.5, -2.0, 0.0,
0.0, -1.5, 1.0, 0.5,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, -1.0, 1.0,
0.0, 0.0, 3.0, -2.0,
0.0, 0.0, -3.0, 1.0,
0.0, 0.0, 1.0, 0.0
};
cudaMemcpyToSymbol(AcudaSpline_double, A_d, 48*sizeof(double), 0,
cudaMemcpyHostToDevice);
AisInitializedPBC = true;
}
template<typename T>
__device__ __forceinline__
T eval_1d_spline (T dist, T rmax, T drInv, T const A[4][4],
T const * __restrict__ coefs)
{
T res;
if (dist >= rmax)
{
res = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor(s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
res = (coefs[index+0]*(A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]) +
coefs[index+1]*(A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]) +
coefs[index+2]*(A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]) +
coefs[index+3]*(A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]));
}
return res;
}
template<typename T>
__device__ __forceinline__
T CMC_eval_1d_spline (T dist, T rmax, T drInv, T const * __restrict__ coefs)
{
T res;
if (dist >= rmax)
{
res = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor(s);
int index = (int)sf;
T t = s - sf;
res = (coefs[index+0] * (((AcudaSpline[ 0] * t + AcudaSpline[ 1]) * t + AcudaSpline[ 2]) * t + AcudaSpline[ 3]) +
coefs[index+1] * (((AcudaSpline[ 4] * t + AcudaSpline[ 5]) * t + AcudaSpline[ 6]) * t + AcudaSpline[ 7]) +
coefs[index+2] * (((AcudaSpline[ 8] * t + AcudaSpline[ 9]) * t + AcudaSpline[10]) * t + AcudaSpline[11]) +
coefs[index+3] * (((AcudaSpline[12] * t + AcudaSpline[13]) * t + AcudaSpline[14]) * t + AcudaSpline[15]) );
}
return res;
}
template<typename T>
__device__ __forceinline__
void eval_1d_spline_vgl (T dist, T rmax, T drInv, T const A[12][4],
T const * __restrict__ coefs,
T& __restrict__ u, T& __restrict__ du,
T& __restrict__ d2u)
{
if (dist >= rmax)
{
u = du = d2u = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor (s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
T c0 = coefs[index+0];
T c1 = coefs[index+1];
T c2 = coefs[index+2];
T c3 = coefs[index+3];
u = (c0 * (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]) +
c1 * (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]) +
c2 * (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]) +
c3 * (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]));
du = drInv *
(c0 * (A[4][0]*t3 + A[4][1]*t2 + A[4][2]*t + A[4][3]) +
c1 * (A[5][0]*t3 + A[5][1]*t2 + A[5][2]*t + A[5][3]) +
c2 * (A[6][0]*t3 + A[6][1]*t2 + A[6][2]*t + A[6][3]) +
c3 * (A[7][0]*t3 + A[7][1]*t2 + A[7][2]*t + A[7][3]));
d2u = drInv*drInv *
(c0 * (A[ 8][0]*t3 + A[ 8][1]*t2 + A[ 8][2]*t + A[ 8][3]) +
c1 * (A[ 9][0]*t3 + A[ 9][1]*t2 + A[ 9][2]*t + A[ 9][3]) +
c2 * (A[10][0]*t3 + A[10][1]*t2 + A[10][2]*t + A[10][3]) +
c3 * (A[11][0]*t3 + A[11][1]*t2 + A[11][2]*t + A[11][3]));
}
}
#define NAIVE_SCHEME 0
#define HORNER_SCHEME 1
#define ESTRIN_SCHEME 2
#define SCHEME2 HORNER_SCHEME
template<typename T>
__device__ __forceinline__
void CMC_eval_1d_spline_vgl (T dist, T rmax, T drInv,
T const * __restrict__ coefs,
T& __restrict__ u,
T& __restrict__ du,
T& __restrict__ d2u)
{
if (dist >= rmax)
{
u = du = d2u = (T)0;
}
else
{
T s = dist * drInv;
T sf = floor (s);
int index = (int)sf;
T t = s - sf;
T c0 = coefs[index+0];
T c1 = coefs[index+1];
T c2 = coefs[index+2];
T c3 = coefs[index+3];
#if (SCHEME2 == HORNER_SCHEME)
u = (c0 * (((AcudaSpline[ 0*4 + 0] * t + AcudaSpline[ 0*4 + 1]) * t + AcudaSpline[ 0*4 + 2]) * t + AcudaSpline[ 0*4 + 3]) +
c1 * (((AcudaSpline[ 1*4 + 0] * t + AcudaSpline[ 1*4 + 1]) * t + AcudaSpline[ 1*4 + 2]) * t + AcudaSpline[ 1*4 + 3]) +
c2 * (((AcudaSpline[ 2*4 + 0] * t + AcudaSpline[ 2*4 + 1]) * t + AcudaSpline[ 2*4 + 2]) * t + AcudaSpline[ 2*4 + 3]) +
c3 * (((AcudaSpline[ 3*4 + 0] * t + AcudaSpline[ 3*4 + 1]) * t + AcudaSpline[ 3*4 + 2]) * t + AcudaSpline[ 3*4 + 3]));
du = drInv *
(c0 * (((AcudaSpline[ 4*4 + 0] * t + AcudaSpline[ 4*4 + 1]) * t + AcudaSpline[ 4*4 + 2]) * t + AcudaSpline[ 4*4 + 3]) +
c1 * (((AcudaSpline[ 5*4 + 0] * t + AcudaSpline[ 5*4 + 1]) * t + AcudaSpline[ 5*4 + 2]) * t + AcudaSpline[ 5*4 + 3]) +
c2 * (((AcudaSpline[ 6*4 + 0] * t + AcudaSpline[ 6*4 + 1]) * t + AcudaSpline[ 6*4 + 2]) * t + AcudaSpline[ 6*4 + 3]) +
c3 * (((AcudaSpline[ 7*4 + 0] * t + AcudaSpline[ 7*4 + 1]) * t + AcudaSpline[ 7*4 + 2]) * t + AcudaSpline[ 7*4 + 3]));
d2u = drInv * drInv *
(c0 * (((AcudaSpline[ 8*4 + 0] * t + AcudaSpline[ 8*4 + 1]) * t + AcudaSpline[ 8*4 + 2]) * t + AcudaSpline[ 8*4 + 3]) +
c1 * (((AcudaSpline[ 9*4 + 0] * t + AcudaSpline[ 9*4 + 1]) * t + AcudaSpline[ 9*4 + 2]) * t + AcudaSpline[ 9*4 + 3]) +
c2 * (((AcudaSpline[10*4 + 0] * t + AcudaSpline[10*4 + 1]) * t + AcudaSpline[10*4 + 2]) * t + AcudaSpline[10*4 + 3]) +
c3 * (((AcudaSpline[11*4 + 0] * t + AcudaSpline[11*4 + 1]) * t + AcudaSpline[11*4 + 2]) * t + AcudaSpline[11*4 + 3]));
#elif (SCHEME2 == ESTRIN_SCHEME)
T t2 = t*t;
u = (c0 * ((AcudaSpline[ 0*4 + 0] * t + AcudaSpline[ 0*4 + 1]) * t2 + (AcudaSpline[ 0*4 + 2] * t + AcudaSpline[ 0*4 + 3])) +
c1 * ((AcudaSpline[ 1*4 + 0] * t + AcudaSpline[ 1*4 + 1]) * t2 + (AcudaSpline[ 1*4 + 2] * t + AcudaSpline[ 1*4 + 3])) +
c2 * ((AcudaSpline[ 2*4 + 0] * t + AcudaSpline[ 2*4 + 1]) * t2 + (AcudaSpline[ 2*4 + 2] * t + AcudaSpline[ 2*4 + 3])) +
c3 * ((AcudaSpline[ 3*4 + 0] * t + AcudaSpline[ 3*4 + 1]) * t2 + (AcudaSpline[ 3*4 + 2] * t + AcudaSpline[ 3*4 + 3])) );
du = drInv *
(c0 * ((AcudaSpline[ 4*4 + 0] * t + AcudaSpline[ 4*4 + 1]) * t2 + (AcudaSpline[ 4*4 + 2] * t + AcudaSpline[ 4*4 + 3])) +
c1 * ((AcudaSpline[ 5*4 + 0] * t + AcudaSpline[ 5*4 + 1]) * t2 + (AcudaSpline[ 5*4 + 2] * t + AcudaSpline[ 5*4 + 3])) +
c2 * ((AcudaSpline[ 6*4 + 0] * t + AcudaSpline[ 6*4 + 1]) * t2 + (AcudaSpline[ 6*4 + 2] * t + AcudaSpline[ 6*4 + 3])) +
c3 * ((AcudaSpline[ 7*4 + 0] * t + AcudaSpline[ 7*4 + 1]) * t2 + (AcudaSpline[ 7*4 + 2] * t + AcudaSpline[ 7*4 + 3])) );
d2u = drInv * drInv *
(c0 * ((AcudaSpline[ 8*4 + 0] * t + AcudaSpline[ 8*4 + 1]) * t2 + (AcudaSpline[ 8*4 + 2] * t + AcudaSpline[ 8*4 + 3])) +
c1 * ((AcudaSpline[ 9*4 + 0] * t + AcudaSpline[ 9*4 + 1]) * t2 + (AcudaSpline[ 9*4 + 2] * t + AcudaSpline[ 9*4 + 3])) +
c2 * ((AcudaSpline[10*4 + 0] * t + AcudaSpline[10*4 + 1]) * t2 + (AcudaSpline[10*4 + 2] * t + AcudaSpline[10*4 + 3])) +
c3 * ((AcudaSpline[11*4 + 0] * t + AcudaSpline[11*4 + 1]) * t2 + (AcudaSpline[11*4 + 2] * t + AcudaSpline[11*4 + 3])) );
#else
T t2 = t*t;
T t3 = t*t2;
u = (c0 * (AcudaSpline[ 0*4 + 0] * t3 + AcudaSpline[ 0*4 + 1] * t2 + AcudaSpline[ 0*4 + 2] * t + AcudaSpline[ 0*4 + 3]) +
c1 * (AcudaSpline[ 1*4 + 0] * t3 + AcudaSpline[ 1*4 + 1] * t2 + AcudaSpline[ 1*4 + 2] * t + AcudaSpline[ 1*4 + 3]) +
c2 * (AcudaSpline[ 2*4 + 0] * t3 + AcudaSpline[ 2*4 + 1] * t2 + AcudaSpline[ 2*4 + 2] * t + AcudaSpline[ 2*4 + 3]) +
c3 * (AcudaSpline[ 3*4 + 0] * t3 + AcudaSpline[ 3*4 + 1] * t2 + AcudaSpline[ 3*4 + 2] * t + AcudaSpline[ 3*4 + 3]));
du = drInv *
(c0 * (AcudaSpline[ 4*4 + 0] * t3 + AcudaSpline[ 4*4 + 1] * t2 + AcudaSpline[ 4*4 + 2] * t + AcudaSpline[ 4*4 + 3]) +
c1 * (AcudaSpline[ 5*4 + 0] * t3 + AcudaSpline[ 5*4 + 1] * t2 + AcudaSpline[ 5*4 + 2] * t + AcudaSpline[ 5*4 + 3]) +
c2 * (AcudaSpline[ 6*4 + 0] * t3 + AcudaSpline[ 6*4 + 1] * t2 + AcudaSpline[ 6*4 + 2] * t + AcudaSpline[ 6*4 + 3]) +
c3 * (AcudaSpline[ 7*4 + 0] * t3 + AcudaSpline[ 7*4 + 1] * t2 + AcudaSpline[ 7*4 + 2] * t + AcudaSpline[ 7*4 + 3]));
d2u = drInv * drInv *
(c0 * (AcudaSpline[ 8*4 + 0] * t3 + AcudaSpline[ 8*4 + 1] * t2 + AcudaSpline[ 8*4 + 2] * t + AcudaSpline[ 8*4 + 3]) +
c1 * (AcudaSpline[ 9*4 + 0] * t3 + AcudaSpline[ 9*4 + 1] * t2 + AcudaSpline[ 9*4 + 2] * t + AcudaSpline[ 9*4 + 3]) +
c2 * (AcudaSpline[10*4 + 0] * t3 + AcudaSpline[10*4 + 1] * t2 + AcudaSpline[10*4 + 2] * t + AcudaSpline[10*4 + 3]) +
c3 * (AcudaSpline[11*4 + 0] * t3 + AcudaSpline[11*4 + 1] * t2 + AcudaSpline[11*4 + 2] * t + AcudaSpline[11*4 + 3]));
#endif
}
}
#define MAX_COEFS 32
template<typename T, int BS >
__global__ void
two_body_sum_PBC_kernel(T **R, int e1_first, int e1_last,
int e2_first, int e2_last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv, T* sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[tid>>2][tid&3] = AcudaSpline[tid];
__syncthreads();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
T mysum = (T)0.0;
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
r1[0][i*BS + tid] = myR[3*e1_first + (3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
if (ptcl1 != ptcl2 && (ptcl1 < (N1+e1_first) ) && (ptcl2 < (N2+e2_first)))
mysum += eval_1d_spline (dist, rMax, drInv, A, coefs);
}
__syncthreads();
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
T factor = (e1_first == e2_first) ? 0.5 : 1.0;
if (tid==0)
sum[blockIdx.x] += factor*shared_sum[0];
}
void
two_body_sum_PBC (float *R[], int e1_first, int e1_last, int e2_first, int e2_last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
two_body_sum_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, e1_first, e1_last, e2_first, e2_last,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
void
two_body_sum_PBC (double *R[], int e1_first, int e1_last, int e2_first, int e2_last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
two_body_sum_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, e1_first, e1_last, e2_first, e2_last,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
two_body_ratio_PBC_kernel(T **R, int first, int last,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv, T* sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = ((T)1)/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T r1[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
shared_sum[tid] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
r1[0][n] = myR[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz;
dx = myRnew[0] - r1[tid][0];
dy = myRnew[1] - r1[tid][1];
dz = myRnew[2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T delta = eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRold[0] - r1[tid][0];
dy = myRold[1] - r1[tid][1];
dz = myRold[2] - r1[tid][2];
dist = min_dist(dx, dy, dz, L, Linv);
delta -= eval_1d_spline (dist, rMax, drInv, A, coefs);
if (ptcl1 != inew && (ptcl1 < (N+first) ))
shared_sum[tid] += delta;
__syncthreads();
}
__syncthreads();
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] += shared_sum[0];
}
void
two_body_ratio_PBC (float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
two_body_ratio_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
void
two_body_ratio_PBC (double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(128);
dim3 dimGrid(numWalkers);
two_body_ratio_PBC_kernel<double,128><<<dimGrid,dimBlock>>>
(R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
two_body_ratio_grad_PBC_kernel(T const * const * __restrict__ R,
int first, int last,
T const * __restrict__ Rnew, int inew,
T const * __restrict__ spline_coefs,
int numCoefs, T rMax,
T const * __restrict__ lattice,
T const * __restrict__ latticeInv,
bool zero, T *__restrict__ ratio_grad)
{
__shared__ T shared_grad[BS][3];
__shared__ T r1[BS][3];
__shared__ T shared_sum[BS];
__shared__ T coefs[MAX_COEFS];
int tid = threadIdx.x;
T dr = rMax /(T)(numCoefs-3);
T drInv = ((T)1)/dr;
// Safety for rounding error
rMax *= 0.999999f;
if (tid < numCoefs)
{
coefs[tid] = spline_coefs[tid];
}
shared_sum[tid] = (T)0;
shared_grad[tid][0] = (T)0;
shared_grad[tid][1] = (T)0;
shared_grad[tid][2] = (T)0;
__syncthreads();
T const * __restrict__ myR = R[blockIdx.x];
T rnew_x = Rnew[3*blockIdx.x+0];
T rnew_y = Rnew[3*blockIdx.x+1];
T rnew_z = Rnew[3*blockIdx.x+2];
T rold_x = myR[3*inew+0];
T rold_y = myR[3*inew+1];
T rold_z = myR[3*inew+2];
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if (((3*b+i)*BS + tid) < (3*N))
{
r1[0][n] = myR[3*first + (3*b+i)*BS + tid];
}
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u, delta, dist;
dx = rold_x - r1[tid][0];
dy = rold_y - r1[tid][1];
dz = rold_z - r1[tid][2];
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
delta = -CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
dx = rnew_x - r1[tid][0];
dy = rnew_y - r1[tid][1];
dz = rnew_z - r1[tid][2];
dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
delta += u;
if ((ptcl1 != inew) && (ptcl1 < (N + first) ))
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
template<typename T, int BS>
__global__ void
two_body_ratio_grad_PBC_kernel_fast (T **R, int first, int last,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv,
bool zero, T* ratio_grad)
{
int tid = threadIdx.x;
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T r1[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
*/
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
/*
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
*/
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N = last - first + 1;
int NB = (N+BS-1)/BS;
__shared__ T shared_sum[BS];
__shared__ T shared_grad[BS][3];
shared_sum[tid] = (T)0.0;
shared_grad[tid][0] = shared_grad[tid][1] = shared_grad[tid][2] = 0.0f;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
r1[0][n] = myR[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u, delta, dist;
dx = myRold[0] - r1[tid][0];
dy = myRold[1] - r1[tid][1];
dz = myRold[2] - r1[tid][2];
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
delta = -eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRnew[0] - r1[tid][0];
dy = myRnew[1] - r1[tid][1];
dz = myRnew[2] - r1[tid][2];
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs,
u, du, d2u);
delta += u;
if (ptcl1 != inew && (ptcl1 < (N+first) ))
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
__syncthreads();
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
// use_fast_image indicates that Rmax < simulation cell radius. In
// this case, we don't have to search over 27 images.
void
two_body_ratio_grad_PBC(float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], bool zero,
float ratio_grad[], int numWalkers,
bool use_fast_image)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
// fprintf(stderr, "first = %d\n", first);
// fprintf(stderr, "last = %d\n", last);
// fprintf(stderr, "inew = %d\n", inew);
// fprintf(stderr, "rMax = %1.3f\n", rMax);
if (use_fast_image)
{
two_body_ratio_grad_PBC_kernel_fast<float,BS><<<dimGrid,dimBlock>>>
(R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
else
{
two_body_ratio_grad_PBC_kernel<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
CMC_PROFILING_END();
}
void
two_body_ratio_grad_PBC(double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], bool zero,
double ratio_grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
two_body_ratio_grad_PBC_kernel<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
template<int BS>
__global__ void
two_body_NLratio_PBC_kernel(NLjobGPU<float> const * __restrict__ jobs,
int first, int last,
float const * const * __restrict__ spline_coefs,
int const * __restrict__ numCoefs,
float const * __restrict__ rMaxList,
float const * __restrict__ lattice,
float const * __restrict__ latticeInv,
float sim_cell_radius)
{
const int MAX_RATIOS = 18;
__shared__ float shared_sum[MAX_RATIOS][BS+1];
__shared__ float myRnew[MAX_RATIOS][3];
__shared__ float coefs[MAX_COEFS];
__shared__ float r1[BS][3];
float const * __restrict__ myCoefs = spline_coefs[blockIdx.x];
NLjobGPU<float> myJob = jobs[blockIdx.x];
const int myNumCoefs = numCoefs[blockIdx.x];
const int tid = threadIdx.x;
if (tid < myNumCoefs)
{
coefs[tid] = myCoefs[tid];
}
for (int i = 0; i < 3; i++)
{
if (i*BS + tid < 3*myJob.NumQuadPoints)
{
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
}
}
for (int i = 0; i < myJob.NumQuadPoints; i++)
{
shared_sum[i][tid] = (float)0;
}
__syncthreads();
const float rMax = rMaxList[blockIdx.x];
const float dr = rMax / (myNumCoefs - 3);
const float drInv = 1.0f / dr;
const int use_fast = sim_cell_radius >= rMax;
const float rold_x = myJob.R[3*myJob.Elec+0];
const float rold_y = myJob.R[3*myJob.Elec+1];
const float rold_z = myJob.R[3*myJob.Elec+2];
const int N = last - first + 1;
const int NB = N / BS + ((N % BS) ? 1 : 0);
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if (((3*b+i)*BS + tid) < (3*N))
{
r1[0][n] = myJob.R[3*first + (3*b+i)*BS + tid];
}
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
float dx = rold_x - r1[tid][0];
float dy = rold_y - r1[tid][1];
float dz = rold_z - r1[tid][2];
float dist;
if (use_fast)
{
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
}
else
{
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
}
float uOld = CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
if (use_fast)
{
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - r1[tid][0];
dy = myRnew[iq][1] - r1[tid][1];
dz = myRnew[iq][2] - r1[tid][2];
dist = CMC_min_dist_fast(dx, dy, dz/*, L, Linv*/);
if ((ptcl1 != myJob.Elec) && (ptcl1 < (N + first)))
{
shared_sum[iq][tid] += CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs) - uOld;
}
}
}
else
{
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - r1[tid][0];
dy = myRnew[iq][1] - r1[tid][1];
dz = myRnew[iq][2] - r1[tid][2];
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
if ((ptcl1 != myJob.Elec) && (ptcl1 < (N + first)))
{
shared_sum[iq][tid] += CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs) - uOld;
}
}
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
{
shared_sum[iq][tid] += shared_sum[iq][tid+s];
}
}
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
{
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
}
template<int BS>
__global__ void
two_body_NLratio_PBC_kernel(NLjobGPU<double> *jobs, int first, int last,
double **spline_coefs, int *numCoefs,
double *rMaxList,
double *lattice, double *latticeInv,
double sim_cell_radius)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<double> myJob;
__shared__ double myRnew[MAX_RATIOS][3], myRold[3];
__shared__ double* myCoefs;
__shared__ int myNumCoefs;
__shared__ double rMax;
if (tid == 0)
{
myJob = jobs[blockIdx.x];
myCoefs = spline_coefs[blockIdx.x];
myNumCoefs = numCoefs[blockIdx.x];
rMax = rMaxList[blockIdx.x];
}
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
double dr = rMax/(double)(myNumCoefs-3);
double drInv = 1.0/dr;
__shared__ double coefs[MAX_COEFS];
__shared__ double r1[BS][3];
__shared__ double L[3][3], Linv[3][3];
if (tid < myNumCoefs)
coefs[tid] = myCoefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ double A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ double shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (double)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
r1[0][n] = myJob.R[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
double dx, dy, dz;
dx = myRold[0] - r1[tid][0];
dy = myRold[1] - r1[tid][1];
dz = myRold[2] - r1[tid][2];
double dist = min_dist(dx, dy, dz, L, Linv);
double uOld = eval_1d_spline (dist, rMax, drInv, A, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - r1[tid][0];
dy = myRnew[iq][1] - r1[tid][1];
dz = myRnew[iq][2] - r1[tid][2];
dist = min_dist(dx, dy, dz, L, Linv);
if (ptcl1 != myJob.Elec && (ptcl1 < (N+first)))
shared_sum[iq][tid] += eval_1d_spline (dist, rMax, drInv, A, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
void
two_body_NLratios_PBC(NLjobGPU<float> jobs[], int first, int last,
float* spline_coefs[], int numCoefs[], float rMax[],
float lattice[], float latticeInv[], float sim_cell_radius,
int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
CMC_PROFILING_BEGIN();
while (numjobs > 65535)
{
dim3 dimGrid(65535);
two_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
jobs += 65535;
numjobs -= 65535;
}
dim3 dimGrid(numjobs);
two_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
CMC_PROFILING_END();
}
void
two_body_NLratios_PBC(NLjobGPU<double> jobs[], int first, int last,
double* spline_coefs[], int numCoefs[], double rMax[],
double lattice[], double latticeInv[],
double sim_cell_radius, int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
while (numjobs > 65535)
{
dim3 dimGrid(65535);
two_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
jobs += 65535;
numjobs -= 65535;
}
dim3 dimGrid(numjobs);
two_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sim_cell_radius);
}
template<typename T>
__global__ void
two_body_update_PBC_kernel (T **R, int N, int iat)
{
__shared__ T* myR;
if (threadIdx.x == 0)
myR = R[blockIdx.x];
__syncthreads();
if (threadIdx.x < 3)
myR[3*iat + threadIdx.x] = myR[3*N + threadIdx.x];
}
void
two_body_update_PBC(float *R[], int N, int iat, int numWalkers)
{
dim3 dimBlock(32);
dim3 dimGrid(numWalkers);
two_body_update_PBC_kernel<float><<<dimGrid, dimBlock>>> (R, N, iat);
}
void
two_body_update_PBC(double *R[], int N, int iat, int numWalkers)
{
dim3 dimBlock(3);
dim3 dimGrid(numWalkers);
two_body_update_PBC_kernel<double><<<dimGrid, dimBlock>>> (R, N, iat);
}
#define MAX_COEFS 32
template<typename T, int BS>
__global__ void
two_body_grad_lapl_PBC_kernel(T **R, int e1_first, int e1_last,
int e2_first, int e2_last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv,
T *gradLapl, int row_stride)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
*/
__syncthreads();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
__shared__ T sGradLapl[BS][4];
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
r1[0][i*BS + tid] = myR[3*e1_first + (3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
int offset = blockIdx.x * row_stride + 4*b1*BS + 4*e1_first;
sGradLapl[tid][0] = sGradLapl[tid][1] =
sGradLapl[tid][2] = sGradLapl[tid][3] = (T)0.0;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz, u, du, d2u;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
if (ptcl1 != ptcl2 && (ptcl1 < (N1+e1_first) ) && (ptcl2 < (N2+e2_first)))
{
du /= dist;
sGradLapl[tid][0] += du * dx;
sGradLapl[tid][1] += du * dy;
sGradLapl[tid][2] += du * dz;
sGradLapl[tid][3] -= d2u + 2.0*du;
}
}
__syncthreads();
}
for (int i=0; i<4; i++)
if ((4*b1+i)*BS + tid < 4*N1)
gradLapl[offset + i*BS +tid] += sGradLapl[0][i*BS+tid];
__syncthreads();
}
}
template<typename T, int BS>
__global__ void
two_body_grad_lapl_PBC_kernel_fast(T **R, int e1_first, int e1_last,
int e2_first, int e2_last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv,
T *gradLapl, int row_stride)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
__shared__ T sGradLapl[BS][4];
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
r1[0][i*BS + tid] = myR[3*e1_first + (3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
int offset = blockIdx.x * row_stride + 4*b1*BS + 4*e1_first;
sGradLapl[tid][0] = sGradLapl[tid][1] =
sGradLapl[tid][2] = sGradLapl[tid][3] = (T)0.0;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz, u, du, d2u;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
if (ptcl1 != ptcl2 && (ptcl1 < (N1+e1_first) ) && (ptcl2 < (N2+e2_first)))
{
du /= dist;
sGradLapl[tid][0] += du * dx;
sGradLapl[tid][1] += du * dy;
sGradLapl[tid][2] += du * dz;
sGradLapl[tid][3] -= d2u + 2.0*du;
}
}
__syncthreads();
}
for (int i=0; i<4; i++)
if ((4*b1+i)*BS + tid < 4*N1)
gradLapl[offset + i*BS +tid] += sGradLapl[0][i*BS+tid];
__syncthreads();
}
}
void
two_body_grad_lapl_PBC(float *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
float gradLapl[], int row_stride, int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
if (sim_cell_radius >= rMax)
two_body_grad_lapl_PBC_kernel_fast<float,BS><<<dimGrid,dimBlock>>>
(R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
else
two_body_grad_lapl_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
CMC_PROFILING_END();
}
void
two_body_grad_lapl_PBC(double *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[],
double gradLapl[], int row_stride, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
two_body_grad_lapl_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
}
template<typename T, int BS>
__global__ void
two_body_grad_PBC_kernel (T const * const * __restrict__ R,
int first, int last, int iat,
T const * __restrict__ spline_coefs,
int numCoefs, T rMax,
T const * __restrict__ lattice,
T const * __restrict__ latticeInv,
bool zeroOut, T * __restrict__ grad)
{
__shared__ T sGrad[BS][3];
__shared__ T r1[BS][3];
__shared__ T coefs[MAX_COEFS];
T dr = rMax/(T)(numCoefs-3);
T drInv = ((T)1)/dr;
int tid = threadIdx.x;
// Safety for rounding error
rMax *= 0.999999f;
if (tid < numCoefs)
{
coefs[tid] = spline_coefs[tid];
}
sGrad[tid][0] = (T)0;
sGrad[tid][1] = (T)0;
sGrad[tid][2] = (T)0;
__syncthreads();
T const * __restrict__ myR = R[blockIdx.x];
T r2_x = myR[3*iat+0];
T r2_y = myR[3*iat+1];
T r2_z = myR[3*iat+2];
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
for (int b = 0; b < NB; b++)
{
// Load block of positions from global memory
for (int i = 0; i < 3; i++)
{
if ((3*b+i)*BS + tid < 3*N)
{
r1[0][i*BS + tid] = myR[3*first + (3*b+i)*BS + tid];
}
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r2_x - r1[tid][0];
dy = r2_y - r1[tid][1];
dz = r2_z - r1[tid][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
if (ptcl1 != iat && ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
{
grad[3*blockIdx.x + tid] = sGrad[0][tid];
}
else
{
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
}
template<typename T, int BS>
__global__ void
two_body_grad_PBC_kernel_fast(T **R, int first, int last, int iat,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zeroOut, T *grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, r2[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3)
r2[tid] = myR[3*iat+tid];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
sGrad[tid][0] = sGrad[tid][1] = sGrad[tid][2] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
r1[0][i*BS + tid] = myR[3*first + (3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r2[0] - r1[tid][0];
dy = r2[1] - r1[tid][1];
dz = r2[2] - r1[tid][2];
T dist = min_dist_fast(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
if (ptcl1 != iat && ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
grad[3*blockIdx.x + tid] = sGrad[0][tid];
else
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
void
two_body_gradient_PBC (float *R[], int first, int last, int iat,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
bool zeroOut,
float grad[], int numWalkers)
{
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
if (sim_cell_radius >= rMax)
two_body_grad_PBC_kernel_fast<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(R, first, last, iat, spline_coefs, numCoefs,
rMax, lattice, latticeInv, zeroOut, grad);
else
two_body_grad_PBC_kernel<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(R, first, last, iat, spline_coefs, numCoefs,
rMax, lattice, latticeInv, zeroOut, grad);
CMC_PROFILING_END();
}
void
two_body_gradient_PBC (double *R[], int first, int last, int iat,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], bool zeroOut,
double grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
two_body_grad_PBC_kernel<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(R, first, last, iat, spline_coefs, numCoefs,
rMax, lattice, latticeInv, zeroOut, grad);
}
template<typename T, int BS>
__global__ void
two_body_derivs_PBC_kernel(T **R, T **gradLogPsi,
int e1_first, int e1_last,
int e2_first, int e2_last,
int numCoefs, T rMax,
T *lattice, T *latticeInv,
T **derivs)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0f/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, *myGrad, *myDerivs;
if (tid == 0)
{
myR = R[blockIdx.x];
myGrad = gradLogPsi[blockIdx.x];
myDerivs = derivs[blockIdx.x];
}
__shared__ T sderivs[MAX_COEFS][2];
// __shared__ T coefs[MAX_COEFS];
// if (tid < numCoefs)
// coefs[tid] = spline_coefs[tid];
__shared__ T r1[BS][3], r2[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
sderivs[tid][0] = T();
sderivs[tid][1] = T();
int N1 = e1_last - e1_first + 1;
int N2 = e2_last - e2_first + 1;
int NB1 = N1/BS + ((N1 % BS) ? 1 : 0);
int NB2 = N2/BS + ((N2 % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
for (int b1=0; b1 < NB1; b1++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N1)
{
int outoff = i*BS+tid;
int inoff = outoff + 3*e1_first + 3*b1*BS;
r1[0][outoff] = myR[inoff];//[3*e1_first + (3*b1+i)*BS + tid];
sGrad[0][outoff] = myGrad[inoff];
}
__syncthreads();
int ptcl1 = e1_first+b1*BS + tid;
for (int b2=0; b2 < NB2; b2++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N2)
r2[0][i*BS + tid] = myR[3*e2_first + (3*b2+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (b2+1)*BS < N2 ? BS : N2-b2*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = e2_first + b2*BS+j;
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T distInv = 1.0f/dist;
T s = dist * drInv;
T sf = floorf (s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
T v0, v1, v2, v3;
// sderivs[index+0][0] += (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]);
// sderivs[index+1][0] += (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]);
// sderivs[index+2][0] += (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]);
// sderivs[index+3][0] += (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]);
v0 = (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]);
v1 = (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]);
v2 = (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]);
v3 = (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]);
for (int id=0; id<BS; id++)
if (tid == id && ptcl1 != ptcl2 && ptcl1 <= e1_last && (dist < rMax))
{
sderivs[index+0][0] += v0;
sderivs[index+1][0] += v1;
sderivs[index+2][0] += v2;
sderivs[index+3][0] += v3;
}
T prefact = (dx*sGrad[tid][0] + dy*sGrad[tid][1] + dz*sGrad[tid][2])*distInv;
T du0 = drInv * (A[4][0]*t3 + A[4][1]*t2 + A[4][2]*t + A[4][3]);
T du1 = drInv * (A[5][0]*t3 + A[5][1]*t2 + A[5][2]*t + A[5][3]);
T du2 = drInv * (A[6][0]*t3 + A[6][1]*t2 + A[6][2]*t + A[6][3]);
T du3 = drInv * (A[7][0]*t3 + A[7][1]*t2 + A[7][2]*t + A[7][3]);
// This is the dot (gradu, grad_log_psi) term.
v0 = 2.0f* prefact * du0;
v1 = 2.0f* prefact * du1;
v2 = 2.0f* prefact * du2;
v3 = 2.0f* prefact * du3;
// This is the lapl u term
v0 -= drInv*drInv*(A[ 8][0]*t3 + A[ 8][1]*t2 + A[ 8][2]*t + A[ 8][3]) + 2.0f*du0*distInv;
v1 -= drInv*drInv*(A[ 9][0]*t3 + A[ 9][1]*t2 + A[ 9][2]*t + A[ 9][3]) + 2.0f*du1*distInv;
v2 -= drInv*drInv*(A[10][0]*t3 + A[10][1]*t2 + A[10][2]*t + A[10][3]) + 2.0f*du2*distInv;
v3 -= drInv*drInv*(A[11][0]*t3 + A[11][1]*t2 + A[11][2]*t + A[11][3]) + 2.0f*du3*distInv;
for (int id=0; id<BS; id++)
if (tid == id && ptcl1 != ptcl2 && ptcl1 <= e1_last && (dist < rMax))
{
sderivs[index+0][1] += v0;
sderivs[index+1][1] += v1;
sderivs[index+2][1] += v2;
sderivs[index+3][1] += v3;
}
}
__syncthreads();
}
}
// if (e1_first == e2_first)
sderivs[tid][0] *= 0.5f;
sderivs[tid][1] *= 0.5f;
if (tid < 2*numCoefs)
myDerivs[tid] = -sderivs[0][tid];
if (tid+BS < 2*numCoefs)
myDerivs[tid+BS] = sderivs[0][tid+BS];
}
void
two_body_derivs_PBC(float *R[], float *gradLogPsi[], int e1_first, int e1_last,
int e2_first, int e2_last,
int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
float *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
two_body_derivs_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
else
two_body_derivs_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
}
void
two_body_derivs_PBC(double *R[], double *gradLogPsi[], int e1_first, int e1_last,
int e2_first, int e2_last,
int numCoefs, double rMax,
double lattice[], double latticeInv[], double sim_cell_radius,
double *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
two_body_derivs_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
else
two_body_derivs_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, gradLogPsi, e1_first, e1_last, e2_first, e2_last, numCoefs,
rMax, lattice, latticeInv, derivs);
}
////////////////////////////////////////////////////////////////
// One-body routines //
////////////////////////////////////////////////////////////////
template<typename T, int BS >
__global__ void
one_body_sum_PBC_kernel(T *C, T **R, int cfirst, int clast,
int efirst, int elast,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, T *sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T rc[BS][3], re[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[tid>>2][tid&3] = AcudaSpline[tid];
__syncthreads();
int Nc = clast - cfirst + 1;
int Ne = elast - efirst + 1;
int NBc = Nc/BS + ((Nc % BS) ? 1 : 0);
int NBe = Ne/BS + ((Ne % BS) ? 1 : 0);
T mysum = (T)0.0;
for (int bc=0; bc < NBc; bc++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*bc+i)*BS + tid < 3*Nc)
rc[0][i*BS + tid] = C[3*cfirst + (3*bc+i)*BS + tid];
__syncthreads();
int ptcl1 = cfirst+bc*BS + tid;
for (int be=0; be < NBe; be++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*be+i)*BS + tid < 3*Ne)
re[0][i*BS + tid] = myR[3*efirst + (3*be+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = (be+1)*BS < Ne ? BS : Ne-be*BS;
for (int j=0; j<end; j++)
{
int ptcl2 = efirst + be*BS+j;
T dx, dy, dz;
dx = re[j][0] - rc[tid][0];
dy = re[j][1] - rc[tid][1];
dz = re[j][2] - rc[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
if ((ptcl1 < (Nc+cfirst) ) && (ptcl2 < (Ne+efirst)))
mysum += eval_1d_spline (dist, rMax, drInv, A, coefs);
}
}
__syncthreads();
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] += shared_sum[0];
}
void
one_body_sum_PBC (float C[], float *R[], int cfirst, int clast, int efirst, int elast,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
one_body_sum_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(C, R, cfirst, clast, efirst, elast,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
void
one_body_sum_PBC (double C[], double *R[], int cfirst, int clast, int efirst, int elast,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 128;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
one_body_sum_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(C, R, cfirst, clast, efirst, elast,
spline_coefs, numCoefs, rMax, lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
one_body_ratio_PBC_kernel(T *C, T **R, int cfirst, int clast,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, T *sum)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int Nc = clast - cfirst + 1;
int NB = Nc/BS + ((Nc % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
shared_sum[tid] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*Nc)
c[0][n] = C[3*cfirst + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = cfirst+b*BS + tid;
T dx, dy, dz;
dx = myRnew[0] - c[tid][0];
dy = myRnew[1] - c[tid][1];
dz = myRnew[2] - c[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T delta = eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
dist = min_dist(dx, dy, dz, L, Linv);
delta -= eval_1d_spline (dist, rMax, drInv, A, coefs);
if (ptcl1 < (Nc+cfirst) )
shared_sum[tid] += delta;
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] += shared_sum[0];
}
void
one_body_ratio_PBC (float C[], float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
one_body_ratio_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
void
one_body_ratio_PBC (double C[], double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], double sum[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(128);
dim3 dimGrid(numWalkers);
one_body_ratio_PBC_kernel<double,128><<<dimGrid,dimBlock>>>
(C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
one_body_ratio_grad_PBC_kernel(T *C, T **R, int cfirst, int clast,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv, bool zero,
T *ratio_grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T c[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
*/
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
/*
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
int index=0;
__shared__ T images[27][3];
if (tid < 3)
for (T i=-1.0; i<=1.001; i+=1.0)
for (T j=-1.0; j<=1.001; j+=1.0)
for (T k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
*/
__syncthreads();
int Nc = clast - cfirst + 1;
int NB = Nc/BS + ((Nc % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
__shared__ T shared_grad[BS][3];
shared_sum[tid] = (T)0.0;
shared_grad[tid][0] = shared_grad[tid][1] = shared_grad[tid][2] = 0.0f;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*Nc)
c[0][n] = C[3*cfirst + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = cfirst+b*BS + tid;
T dx, dy, dz, dist, delta, u, du, d2u;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
delta =- CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
dx = myRnew[0] - c[tid][0];
dy = myRnew[1] - c[tid][1];
dz = myRnew[2] - c[tid][2];
dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
delta += u;
if (ptcl1 < (Nc+cfirst) )
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
template<typename T, int BS>
__global__ void
one_body_ratio_grad_PBC_kernel_fast(T *C, T **R, int cfirst, int clast,
T *Rnew, int inew,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zero,
T *ratio_grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
__shared__ T myRnew[3], myRold[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3 )
{
myRnew[tid] = Rnew[3*blockIdx.x+tid];
myRold[tid] = myR[3*inew+tid];
}
__syncthreads();
__shared__ T coefs[MAX_COEFS];
__shared__ T c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int Nc = clast - cfirst + 1;
int NB = Nc/BS + ((Nc % BS) ? 1 : 0);
__shared__ T shared_sum[BS];
__shared__ T shared_grad[BS][3];
shared_sum[tid] = (T)0.0;
shared_grad[tid][0] = shared_grad[tid][1] = shared_grad[tid][2] = 0.0f;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*Nc)
c[0][n] = C[3*cfirst + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = cfirst+b*BS + tid;
T dx, dy, dz, dist, delta, u, du, d2u;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
dist = min_dist_fast(dx, dy, dz, L, Linv);
delta =- eval_1d_spline (dist, rMax, drInv, A, coefs);
dx = myRnew[0] - c[tid][0];
dy = myRnew[1] - c[tid][1];
dz = myRnew[2] - c[tid][2];
dist = min_dist_fast(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
delta += u;
if (ptcl1 < (Nc+cfirst) )
{
du /= dist;
shared_sum[tid] += delta;
shared_grad[tid][0] += du * dx;
shared_grad[tid][1] += du * dy;
shared_grad[tid][2] += du * dz;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
{
shared_sum[tid] += shared_sum[tid+s];
shared_grad[tid][0] += shared_grad[tid+s][0];
shared_grad[tid][1] += shared_grad[tid+s][1];
shared_grad[tid][2] += shared_grad[tid+s][2];
}
__syncthreads();
}
if (tid==0)
{
if (zero)
{
ratio_grad[4*blockIdx.x+0] = shared_sum[0];
ratio_grad[4*blockIdx.x+1] = shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] = shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] = shared_grad[0][2];
}
else
{
ratio_grad[4*blockIdx.x+0] += shared_sum[0];
ratio_grad[4*blockIdx.x+1] += shared_grad[0][0];
ratio_grad[4*blockIdx.x+2] += shared_grad[0][1];
ratio_grad[4*blockIdx.x+3] += shared_grad[0][2];
}
}
}
void
one_body_ratio_grad_PBC (float C[], float *R[], int first, int last,
float Rnew[], int inew,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], bool zero,
float ratio_grad[], int numWalkers,
bool use_fast_image)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
CMC_PROFILING_BEGIN();
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
// if (use_fast_image)
// one_body_ratio_grad_kernel_fast<float,BS><<<dimGrid,dimBlock>>>
// (C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
// lattice, latticeInv, zero, ratio_grad);
// else
one_body_ratio_grad_PBC_kernel<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
CMC_PROFILING_END();
}
void
one_body_ratio_grad_PBC (double C[], double *R[], int first, int last,
double Rnew[], int inew,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], bool zero,
double ratio_grad[], int numWalkers, bool use_fast_image)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
// if (use_fast_image)
// one_body_ratio_grad_kernel_fast<double,BS><<<dimGrid,dimBlock>>>
// (C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
// lattice, latticeInv, zero, ratio_grad);
// else
one_body_ratio_grad_PBC_kernel<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(C, R, first, last, Rnew, inew, spline_coefs, numCoefs, rMax,
lattice, latticeInv, zero, ratio_grad);
}
template<typename T>
__global__ void
one_body_update_kernel (T **R, int N, int iat)
{
__shared__ T* myR;
if (threadIdx.x == 0)
myR = R[blockIdx.x];
__syncthreads();
if (threadIdx.x < 3)
myR[3*iat + threadIdx.x] = myR[3*N + threadIdx.x];
}
void
one_body_update(float *R[], int N, int iat, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(32);
dim3 dimGrid(numWalkers);
one_body_update_kernel<float><<<dimGrid, dimBlock>>> (R, N, iat);
}
void
one_body_update(double *R[], int N, int iat, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
dim3 dimBlock(3);
dim3 dimGrid(numWalkers);
one_body_update_kernel<double><<<dimGrid, dimBlock>>> (R, N, iat);
}
template<typename T, int BS>
__global__ void
one_body_grad_lapl_PBC_kernel(T *C, T **R, int cfirst, int clast,
int efirst, int elast,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T* latticeInv,
T *gradLapl, int row_stride)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T r[BS][3], c[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
// if (tid == 31)
// printf ("1) coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f\n",
// coefs[0], coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7]);
int index=0;
__shared__ T images[27][3];
if (tid < 3)
for (T i=-1.0; i<=1.001; i+=1.0)
for (T j=-1.0; j<=1.001; j+=1.0)
for (T k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__syncthreads();
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
*/
__syncthreads();
int Nc = clast - cfirst + 1;
int Ne = elast - efirst + 1;
int NBc = (Nc+BS-1)/BS;
int NBe = (Ne+BS-1)/BS;
__shared__ T sGradLapl[BS][4];
for (int be=0; be < NBe; be++)
{
// if (tid == 31)
// printf ("2) coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f\n",
// coefs[0], coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7]);
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*be+i)*BS + tid < 3*Ne)
r[0][i*BS + tid] = myR[3*efirst + (3*be+i)*BS + tid];
__syncthreads();
int eptcl = efirst+be*BS + tid;
int offset = blockIdx.x * row_stride + 4*be*BS + 4*efirst;
sGradLapl[tid][0] = sGradLapl[tid][1] =
sGradLapl[tid][2] = sGradLapl[tid][3] = (T)0.0;
for (int bc=0; bc < NBc; bc++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*bc+i)*BS + tid < 3*Nc)
c[0][i*BS + tid] = C[3*cfirst + (3*bc+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = ((bc+1)*BS < Nc) ? BS : Nc-bc*BS;
for (int j=0; j<end; j++)
{
int cptcl = cfirst + bc*BS+j;
T dx, dy, dz, u, du, d2u;
dx = r[tid][0] - c[j][0];
dy = r[tid][1] - c[j][1];
dz = r[tid][2] - c[j][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
// if (isinf(coefs[0]))
// printf ("3) c0=%1.5f coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f tid=%d\n", c0,
// coefs[0], spline_coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7], tid);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
// if (isinf(coefs[0]))
// printf ("4) coefs[] = %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f %1.5f tid=%d\n",
// coefs[0], coefs[1], coefs[2], coefs[3],
// coefs[4], coefs[5], coefs[6], coefs[7], tid);
// printf("drInv=%1.5f dist=%1.5f coefs[1]=%1.5f A[0]=%1.5f\n",
// drInv, dist, coefs[1], A[0]);
if (cptcl < (Nc+cfirst) && (eptcl < (Ne+efirst)))
{
du /= dist;
sGradLapl[tid][0] -= du * dx;
sGradLapl[tid][1] -= du * dy;
sGradLapl[tid][2] -= du * dz;
sGradLapl[tid][3] -= d2u + 2.0*du;
}
}
__syncthreads();
}
__syncthreads();
for (int i=0; i<4; i++)
if ((4*be+i)*BS + tid < 4*Ne)
gradLapl[offset + i*BS +tid] += sGradLapl[0][i*BS+tid];
__syncthreads();
}
}
void
one_body_grad_lapl_PBC(float C[], float *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[],
float gradLapl[], int row_stride, int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
one_body_grad_lapl_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(C, R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
}
void
one_body_grad_lapl_PBC(double C[], double *R[], int e1_first, int e1_last,
int e2_first, int e2_last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[],
double gradLapl[], int row_stride, int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
one_body_grad_lapl_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(C, R, e1_first, e1_last, e2_first, e2_last, spline_coefs, numCoefs,
rMax, lattice, latticeInv, gradLapl, row_stride);
}
template<int BS>
__global__ void
one_body_NLratio_PBC_kernel(NLjobGPU<float> *jobs, float *C, int first, int last,
float *spline_coefs, int numCoefs, float rMax,
float *lattice, float *latticeInv)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<float> myJob;
__shared__ float myRnew[MAX_RATIOS][3], myRold[3];
if (tid == 0)
myJob = jobs[blockIdx.x];
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
float dr = rMax/(float)(numCoefs-3);
float drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
__shared__ float coefs[MAX_COEFS];
__shared__ float c[BS][3];
/*
__shared__ float L[3][3], Linv[3][3];
*/
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
/*
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
int index=0;
__shared__ float images[27][3];
if (tid < 3)
for (float i=-1.0; i<=1.001; i+=1.0)
for (float j=-1.0; j<=1.001; j+=1.0)
for (float k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__syncthreads();
__shared__ float A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
*/
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ float shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (float)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
c[0][n] = C[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
float dx, dy, dz;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
float dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
float uOld = CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - c[tid][0];
dy = myRnew[iq][1] - c[tid][1];
dz = myRnew[iq][2] - c[tid][2];
dist = CMC_min_dist_only(dx, dy, dz/*, L, Linv, images*/);
if (ptcl1 < (N+first))
shared_sum[iq][tid] += CMC_eval_1d_spline (dist, rMax, drInv/*, A*/, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
template<int BS>
__global__ void
one_body_NLratio_PBC_kernel_fast(NLjobGPU<float> *jobs, float *C, int first, int last,
float *spline_coefs, int numCoefs, float rMax,
float *lattice, float *latticeInv)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<float> myJob;
__shared__ float myRnew[MAX_RATIOS][3], myRold[3];
if (tid == 0)
myJob = jobs[blockIdx.x];
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
float dr = rMax/(float)(numCoefs-3);
float drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
__shared__ float coefs[MAX_COEFS];
__shared__ float c[BS][3];
__shared__ float L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ float A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ float shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (float)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
c[0][n] = C[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
float dx, dy, dz;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
float dist = min_dist_fast(dx, dy, dz, L, Linv);
float uOld = eval_1d_spline (dist, rMax, drInv, A, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - c[tid][0];
dy = myRnew[iq][1] - c[tid][1];
dz = myRnew[iq][2] - c[tid][2];
dist = min_dist_fast(dx, dy, dz, L, Linv);
if (ptcl1 < (N+first))
shared_sum[iq][tid] += eval_1d_spline (dist, rMax, drInv, A, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
template<int BS>
__global__ void
one_body_NLratio_PBC_kernel(NLjobGPU<double> *jobs, double *C, int first, int last,
double *spline_coefs, int numCoefs, double rMax,
double *lattice, double *latticeInv)
{
const int MAX_RATIOS = 18;
int tid = threadIdx.x;
__shared__ NLjobGPU<double> myJob;
__shared__ double myRnew[MAX_RATIOS][3], myRold[3];
if (tid == 0)
myJob = jobs[blockIdx.x];
__syncthreads();
if (tid < 3 )
myRold[tid] = myJob.R[3*myJob.Elec+tid];
for (int i=0; i<3; i++)
if (i*BS + tid < 3*myJob.NumQuadPoints)
myRnew[0][i*BS+tid] = myJob.QuadPoints[i*BS+tid];
__syncthreads();
double dr = rMax/(double)(numCoefs-3);
double drInv = 1.0/dr;
__shared__ double coefs[MAX_COEFS];
__shared__ double c[BS][3];
__shared__ double L[3][3], Linv[3][3];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ double images[27][3];
int index=0;
if (tid < 3)
for (float i=-1.0; i<=1.001; i+=1.0)
for (float j=-1.0; j<=1.001; j+=1.0)
for (float k=-1.0; k<=1.001; k+=1.0)
{
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
__syncthreads();
__shared__ double A[4][4];
if (tid < 16)
A[(tid>>2)][tid&3] = AcudaSpline[tid];
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ double shared_sum[MAX_RATIOS][BS+1];
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] = (double)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
{
int n = i*BS + tid;
if ((3*b+i)*BS + tid < 3*N)
c[0][n] = C[3*first + (3*b+i)*BS + tid];
}
__syncthreads();
int ptcl1 = first+b*BS + tid;
double dx, dy, dz;
dx = myRold[0] - c[tid][0];
dy = myRold[1] - c[tid][1];
dz = myRold[2] - c[tid][2];
double dist = min_dist(dx, dy, dz, L, Linv, images);
double uOld = eval_1d_spline (dist, rMax, drInv, A, coefs);
for (int iq=0; iq<myJob.NumQuadPoints; iq++)
{
dx = myRnew[iq][0] - c[tid][0];
dy = myRnew[iq][1] - c[tid][1];
dz = myRnew[iq][2] - c[tid][2];
dist = min_dist(dx, dy, dz, L, Linv, images);
if (ptcl1 != myJob.Elec && (ptcl1 < (N+first)))
shared_sum[iq][tid] += eval_1d_spline (dist, rMax, drInv, A, coefs) - uOld;
}
__syncthreads();
}
for (int s=(BS>>1); s>0; s>>=1)
{
if (tid < s)
for (int iq=0; iq < myJob.NumQuadPoints; iq++)
shared_sum[iq][tid] += shared_sum[iq][tid+s];
__syncthreads();
}
if (tid < myJob.NumQuadPoints)
myJob.Ratios[tid] *= exp(-shared_sum[tid][0]);
}
void
one_body_NLratios_PBC(NLjobGPU<float> jobs[], float C[], int first, int last,
float spline_coefs[], int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
CMC_PROFILING_BEGIN();
while (numjobs > 65535)
{
dim3 dimGrid(65535);
if (rMax <= sim_cell_radius)
{
// fprintf (stderr, "Using fast J1 NL kernel.\n");
one_body_NLratio_PBC_kernel_fast<BS><<<dimGrid,dimBlock>>>
(jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
else
{
// fprintf (stderr, "Using slow J1 NL kernel.\n");
one_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
numjobs -= 65535;
jobs += 65535;
}
dim3 dimGrid(numjobs);
if (rMax <= sim_cell_radius)
{
// fprintf (stderr, "Using fast J1 NL kernel.\n");
one_body_NLratio_PBC_kernel_fast<BS><<<dimGrid,dimBlock>>>
(jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
else
{
// fprintf (stderr, "Using slow J1 NL kernel.\n");
one_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
CMC_PROFILING_END();
}
void
one_body_NLratios_PBC(NLjobGPU<double> jobs[], double C[], int first, int last,
double spline_coefs[], int numCoefs, double rMax,
double lattice[], double latticeInv[], int numjobs)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
int blockx = numjobs % 65535;
int blocky = numjobs / 65535 + 1;
dim3 dimGrid(blockx, blocky);
one_body_NLratio_PBC_kernel<BS><<<dimGrid,dimBlock>>>
(jobs, C, first, last, spline_coefs, numCoefs, rMax,
lattice, latticeInv);
}
template<typename T, int BS>
__global__ void
one_body_grad_PBC_kernel(T **R, int iat, T *C, int first, int last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zeroOut, T* grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, r[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3)
r[tid] = myR[3*iat+tid];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T c[BS][3];
/*
__shared__ T L[3][3], Linv[3][3];
if (tid < 9) {
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16) {
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int index=0;
__shared__ T images[27][3];
if (tid < 3)
for (T i=-1.0; i<=1.001; i+=1.0)
for (T j=-1.0; j<=1.001; j+=1.0)
for (T k=-1.0; k<=1.001; k+=1.0) {
images[index][tid] =
i*L[0][tid] + j*L[1][tid] + k*L[2][tid];
index++;
}
*/
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
sGrad[tid][0] = sGrad[tid][1] = sGrad[tid][2] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
c[0][i*BS + tid] = C[3*first + (3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r[0] - c[tid][0];
dy = r[1] - c[tid][1];
dz = r[2] - c[tid][2];
T dist = CMC_min_dist(dx, dy, dz/*, L, Linv, images*/);
CMC_eval_1d_spline_vgl (dist, rMax, drInv/*, A*/, coefs, u, du, d2u);
if (ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
grad[3*blockIdx.x + tid] = sGrad[0][tid];
else
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
template<typename T, int BS>
__global__ void
one_body_grad_PBC_kernel_fast(T **R, int iat, T *C, int first, int last,
T *spline_coefs, int numCoefs, T rMax,
T *lattice, T *latticeInv, bool zeroOut, T *grad)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, r[3];
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
if (tid < 3)
r[tid] = myR[3*iat+tid];
__shared__ T coefs[MAX_COEFS];
if (tid < numCoefs)
coefs[tid] = spline_coefs[tid];
__shared__ T c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
int N = last - first + 1;
int NB = N/BS + ((N % BS) ? 1 : 0);
__shared__ T sGrad[BS][3];
sGrad[tid][0] = sGrad[tid][1] = sGrad[tid][2] = (T)0.0;
for (int b=0; b < NB; b++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
c[0][i*BS + tid] = C[3*first + (3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = first+b*BS + tid;
T dx, dy, dz, u, du, d2u;
dx = r[0] - c[tid][0];
dy = r[1] - c[tid][1];
dz = r[2] - c[tid][2];
T dist = min_dist_fast(dx, dy, dz, L, Linv);
eval_1d_spline_vgl (dist, rMax, drInv, A, coefs, u, du, d2u);
if (ptcl1 < (N+first))
{
du /= dist;
sGrad[tid][0] += du * dx;
sGrad[tid][1] += du * dy;
sGrad[tid][2] += du * dz;
}
__syncthreads();
}
// Do reduction across threads in block
for (int s=BS>>1; s>0; s>>=1)
{
if (tid < s)
{
sGrad[tid][0] += sGrad[tid+s][0];
sGrad[tid][1] += sGrad[tid+s][1];
sGrad[tid][2] += sGrad[tid+s][2];
}
__syncthreads();
}
if (tid < 3)
{
if (zeroOut)
grad[3*blockIdx.x + tid] = sGrad[0][tid];
else
grad[3*blockIdx.x + tid] += sGrad[0][tid];
}
}
void
one_body_gradient_PBC (float *Rlist[], int iat, float C[], int first, int last,
float spline_coefs[], int num_coefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
bool zeroSum, float grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
CMC_PROFILING_BEGIN();
// if (sim_cell_radius >= rMax)
// one_body_grad_kernel_fast<float,BS><<<dimGrid,dimBlock>>>
// (Rlist, iat, C, first, last, spline_coefs, num_coefs, rMax,
// L, Linv, zeroSum, grad);
// else
one_body_grad_PBC_kernel<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(Rlist, iat, C, first, last, spline_coefs, num_coefs, rMax,
lattice, latticeInv, zeroSum, grad);
CMC_PROFILING_END();
}
void
one_body_gradient_PBC (double *Rlist[], int iat, double C[], int first, int last,
double spline_coefs[], int num_coefs, double rMax,
double L[], double Linv[], bool zeroSum,
double grad[], int numWalkers)
{
if (!AisInitializedPBC)
cuda_spline_init_PBC();
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
one_body_grad_PBC_kernel<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(Rlist, iat, C, first, last, spline_coefs, num_coefs, rMax,
L, Linv, zeroSum, grad);
}
template<typename T, int BS>
__global__ void
one_body_derivs_PBC_kernel(T* C, T **R, T **gradLogPsi,
int cfirst, int clast,
int efirst, int elast,
int numCoefs, T rMax,
T *lattice, T *latticeInv,
T **derivs)
{
T dr = rMax/(T)(numCoefs-3);
T drInv = 1.0/dr;
__syncthreads();
// Safety for rounding error
rMax *= 0.999999f;
int tid = threadIdx.x;
__shared__ T *myR, *myGrad, *myDerivs;
if (tid == 0)
{
myR = R[blockIdx.x];
myGrad = gradLogPsi[blockIdx.x];
myDerivs = derivs[blockIdx.x];
}
__shared__ T sderivs[MAX_COEFS][2];
__shared__ T r[BS][3], c[BS][3];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__shared__ T A[12][4];
if (tid < 16)
{
A[0+(tid>>2)][tid&3] = AcudaSpline[tid+0];
A[4+(tid>>2)][tid&3] = AcudaSpline[tid+16];
A[8+(tid>>2)][tid&3] = AcudaSpline[tid+32];
}
__syncthreads();
sderivs[tid][0] = T();
sderivs[tid][1] = T();
int Nc = clast - cfirst + 1;
int Ne = elast - efirst + 1;
int NBc = (Nc+BS-1)/BS;
int NBe = (Ne+BS-1)/BS;
__shared__ T sGrad[BS][3];
for (int be=0; be < NBe; be++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*be+i)*BS + tid < 3*Ne)
{
int outoff = i*BS+tid;
int inoff = outoff + 3*efirst + 3*be*BS;
r[0][outoff] = myR[inoff];
sGrad[0][outoff] = myGrad[inoff];
}
__syncthreads();
int eptcl = efirst+be*BS + tid;
for (int bc=0; bc < NBc; bc++)
{
// Load block of positions from global memory
for (int i=0; i<3; i++)
if ((3*bc+i)*BS + tid < 3*Nc)
c[0][i*BS + tid] = C[3*cfirst + (3*bc+i)*BS + tid];
__syncthreads();
// Now, loop over particles
int end = min(BS, Nc-bc*BS);
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = c[j][0] - r[tid][0];
dy = c[j][1] - r[tid][1];
dz = c[j][2] - r[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
T distInv = 1.0f/dist;
T s = dist * drInv;
T sf = floorf (s);
int index = (int)sf;
T t = s - sf;
T t2 = t*t;
T t3 = t*t2;
T v0 = (A[0][0]*t3 + A[0][1]*t2 + A[0][2]*t + A[0][3]);
T v1 = (A[1][0]*t3 + A[1][1]*t2 + A[1][2]*t + A[1][3]);
T v2 = (A[2][0]*t3 + A[2][1]*t2 + A[2][2]*t + A[2][3]);
T v3 = (A[3][0]*t3 + A[3][1]*t2 + A[3][2]*t + A[3][3]);
for (int id=0; id<BS; id++)
if (tid == id && eptcl <= elast && (dist < rMax))
{
sderivs[index+0][0] += v0;
sderivs[index+1][0] += v1;
sderivs[index+2][0] += v2;
sderivs[index+3][0] += v3;
}
T prefact = (dx*sGrad[tid][0] + dy*sGrad[tid][1] + dz*sGrad[tid][2])*distInv;
T du0 = drInv * (A[4][0]*t3 + A[4][1]*t2 + A[4][2]*t + A[4][3]);
T du1 = drInv * (A[5][0]*t3 + A[5][1]*t2 + A[5][2]*t + A[5][3]);
T du2 = drInv * (A[6][0]*t3 + A[6][1]*t2 + A[6][2]*t + A[6][3]);
T du3 = drInv * (A[7][0]*t3 + A[7][1]*t2 + A[7][2]*t + A[7][3]);
// This is the dot (gradu, grad_log_psi) term.
v0 = 2.0f* prefact * du0;
v1 = 2.0f* prefact * du1;
v2 = 2.0f* prefact * du2;
v3 = 2.0f* prefact * du3;
// This is the lapl u term
v0 -= drInv*drInv*(A[ 8][0]*t3 + A[ 8][1]*t2 + A[ 8][2]*t + A[ 8][3]) + 2.0f*du0*distInv;
v1 -= drInv*drInv*(A[ 9][0]*t3 + A[ 9][1]*t2 + A[ 9][2]*t + A[ 9][3]) + 2.0f*du1*distInv;
v2 -= drInv*drInv*(A[10][0]*t3 + A[10][1]*t2 + A[10][2]*t + A[10][3]) + 2.0f*du2*distInv;
v3 -= drInv*drInv*(A[11][0]*t3 + A[11][1]*t2 + A[11][2]*t + A[11][3]) + 2.0f*du3*distInv;
for (int id=0; id<BS; id++)
if (tid == id && eptcl <= elast && (dist < rMax))
{
sderivs[index+0][1] += v0;
sderivs[index+1][1] += v1;
sderivs[index+2][1] += v2;
sderivs[index+3][1] += v3;
}
}
__syncthreads();
}
}
sderivs[tid][1] *= 0.5f;
if (tid < 2*numCoefs)
myDerivs[tid] = -sderivs[0][tid];
if (tid+BS < 2*numCoefs)
myDerivs[tid+BS] = -sderivs[0][tid+BS];
}
void
one_body_derivs_PBC(float C[], float *R[], float *gradLogPsi[],
int cfirst, int clast,
int efirst, int elast,
int numCoefs, float rMax,
float lattice[], float latticeInv[], float sim_cell_radius,
float *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
one_body_derivs_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
else
one_body_derivs_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
}
void
one_body_derivs_PBC(double C[], double *R[], double *gradLogPsi[],
int cfirst, int clast,
int efirst, int elast,
int numCoefs, double rMax,
double lattice[], double latticeInv[], double sim_cell_radius,
double *derivs[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
if (sim_cell_radius >= rMax)
one_body_derivs_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
else
one_body_derivs_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(C, R, gradLogPsi, cfirst, clast, efirst, elast, numCoefs,
rMax, lattice, latticeInv, derivs);
}
void testPBC()
{
dim3 dimBlock(32);
dim3 dimGrid(1000);
float *R[1000];
float L[9], Linv[9];
float spline_coefs[10];
float dr = 0.1;
float sum[1000];
two_body_sum_PBC_kernel<float,32><<<dimGrid,dimBlock>>>(R, 0, 100, 0, 100, spline_coefs, 10, dr,
L, Linv, sum);
}
|
834f2332109901f7370d5e2b1da6f80fa9d46f75.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "test_launch_cuda_native.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *scalar = NULL;
hipMalloc(&scalar, XSIZE*YSIZE);
float *vector = NULL;
hipMalloc(&vector, XSIZE*YSIZE);
int sxy = 1;
int sx = 1;
int sy = 1;
int sz = 1;
int stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
test_launch_cuda_native), dim3(gridBlock),dim3(threadBlock), 0, 0, scalar,vector,sxy,sx,sy,sz,stride);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
test_launch_cuda_native), dim3(gridBlock),dim3(threadBlock), 0, 0, scalar,vector,sxy,sx,sy,sz,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
test_launch_cuda_native), dim3(gridBlock),dim3(threadBlock), 0, 0, scalar,vector,sxy,sx,sy,sz,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 834f2332109901f7370d5e2b1da6f80fa9d46f75.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "test_launch_cuda_native.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *scalar = NULL;
cudaMalloc(&scalar, XSIZE*YSIZE);
float *vector = NULL;
cudaMalloc(&vector, XSIZE*YSIZE);
int sxy = 1;
int sx = 1;
int sy = 1;
int sz = 1;
int stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
test_launch_cuda_native<<<gridBlock,threadBlock>>>(scalar,vector,sxy,sx,sy,sz,stride);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
test_launch_cuda_native<<<gridBlock,threadBlock>>>(scalar,vector,sxy,sx,sy,sz,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
test_launch_cuda_native<<<gridBlock,threadBlock>>>(scalar,vector,sxy,sx,sy,sz,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
140274704ee3bc812ae75bbdbb794ac18d8279ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
#include "texture_binder.hpp"
#include<iostream>
using namespace kfusion::device;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y);
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
hipLaunchKernelGGL(( clear_volume_kernel), dim3(grid), dim3(block), 0, 0, volume);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, hipFilterModePoint, hipAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1); //1 --
int weight_new = min (weight_prev + 1, volume.max_weight); //max_weight=64
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
//void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
void kfusion::device::integrate(const PtrStepSz<float>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist; //trunc_dist=0.04
//cout<<ti.tranc_dist_inv<<endl;
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder; //texture_binder.hpp
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block), 0, 0, ti, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (hipGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (hipGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
hipLaunchKernelGGL(( extract_kernel), dim3(grid), dim3(block), 0, 0, fs, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
hipLaunchKernelGGL(( extract_normals_kernel), dim3(grid), dim3(block), 0, 0, en, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 140274704ee3bc812ae75bbdbb794ac18d8279ea.cu | #include "device.hpp"
#include "texture_binder.hpp"
#include<iostream>
using namespace kfusion::device;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y);
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1); //把当前权重设为1 --
int weight_new = min (weight_prev + 1, volume.max_weight); //max_weight=64
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
//void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
void kfusion::device::integrate(const PtrStepSz<float>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist; //trunc_dist=0.04
//cout<<ti.tranc_dist_inv<<endl;
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder; //绑定纹理,自定义的一个类,在texture_binder.hpp中有定义
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (cudaGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (cudaGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
690b87cd2481b2d723e1f29fd77318e3efbea56d.hip | // !!! This is a file automatically generated by hipify!!!
#define __BSD_SOURCE
#include <math.h> // fabsf
#include <stdlib.h> // malloc/free
#include <stdio.h> // printf
#include <time.h> // time
#include <sys/time.h> // gettimeofday, timersub
#include <hip/hip_runtime.h> // API de cuda
#include <cutil_inline.h> // Funciones para chequeo de errores
#define N 1024
// dimensiones del bloque
#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 16
// ndice de una coordenada bidimensional de una
// matriz NxN en el arreglo que la almacena
__host__ __device__ __inline__ uint index(uint y, uint x) {
return x + y * N;
}
// multiplicacin de dos matrices NxN usando memoria compartida
__global__ void mm_shared(const float * a, const float * b, float * c) {
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < N && y < N){
// matriz compartida donde guardamos parte de A temporalmente
__shared__ float tmp_a[BLOCK_HEIGHT][BLOCK_WIDTH];
// acumulador temporal del resultado
float result = 0.0f;
// avanzamos de a bloques
for (uint i = 0; i < N; i += BLOCK_WIDTH) {
// copiar todo el bloque de A a memoria compartida
tmp_a[threadIdx.y][threadIdx.x] = a[index(y, i + threadIdx.x)];
// esperar que todos los threads hayan copiado su valor
// elimino barrera porque el tamao del bloque es un warp
//__syncthreads();
// actualizar result para los valores de A que tenemos en shared
for (uint j = 0; j < BLOCK_WIDTH; ++j) {
result += tmp_a[threadIdx.y][j] * b[index(i + j, x)];
}
// esperar que todos los threads terminen antes de sobreescribir tmp_a
__syncthreads();
}
// guardar el resultado final
c[index(y,x)] = result;
}
}
// implementacin trivial ikj en CPU de referencia
// con algo de suerte el compilador vectoriza
static void mm_cpu(const float * a, const float * b, float * c) {
for (uint y = 0; y < N; ++y) {
for (uint x = 0; x < N; ++x) {
c[index(y,x)] = 0.0f;
}
for (uint k = 0; k < N; ++k) {
for (uint x = 0; x < N; ++x) {
c[index(y, x)] += a[index(y, k)] * b[index(k, x)];
}
}
}
}
// comprobar dos resultados y listar diferencias significativas
static void check_result(const float * reference, const float * other) {
for (uint y = 0; y < N; ++y) {
for (uint x = 0; x < N; ++x) {
if (fabsf(reference[index(y, x)] - other[index(y, x)]) > 0.001f) {
printf("y:%u x:%u reference:%f result:%f\n", y, x, reference[index(y, x)], other[index(y, x)]);
}
}
}
}
int main(int argc, char *argv[]) {
// pedir memoria en el host
size_t matrix_size = N * N * sizeof(float);
float * host_a = (float *) malloc(matrix_size);
float * host_b = (float *) malloc(matrix_size);
float * host_c = (float *) malloc(matrix_size);
float * host_c_reference = (float *) malloc(matrix_size);
// llenar A y B con numeros aleatorios
srand(time(0));
for (uint y = 0; y < N; ++y) {
for (uint x = 0; x < N; ++x) {
host_a[index(y, x)] = (float) rand() / RAND_MAX;
host_b[index(y, x)] = (float) rand() / RAND_MAX;
}
}
// correr en CPU y tomar el tiempo
struct timeval start, finish, elapsed;
double cpusecs;
gettimeofday(&start, NULL);
mm_cpu(host_a, host_b, host_c_reference);
gettimeofday(&finish, NULL);
timersub(&finish, &start, &elapsed);
cpusecs = elapsed.tv_sec + elapsed.tv_usec / 1000000.0;
printf("CPU time: %f\n", cpusecs);
// pedir memoria en la GPU para A, B y C
float * dev_a;
float * dev_b;
float * dev_c;
cutilSafeCall(hipMalloc((void **) &dev_a, matrix_size));
cutilSafeCall(hipMalloc((void **) &dev_b, matrix_size));
cutilSafeCall(hipMalloc((void **) &dev_c, matrix_size));
// copiar A y B al device
cutilSafeCall(hipMemcpy(dev_a, host_a, matrix_size, hipMemcpyDefault));
cutilSafeCall(hipMemcpy(dev_b, host_b, matrix_size, hipMemcpyDefault));
// configurar la grilla y lanzar el kernel
dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 grid(N/block.x, N/block.y);
hipLaunchKernelGGL(( mm_shared), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, dev_c);
// esperar que termine
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("shared_a");
// Copiar datos al host y verificar la validez del resultado
cutilSafeCall(hipMemcpy(host_c, dev_c, matrix_size, hipMemcpyDefault));
check_result(host_c_reference, host_c);
// liberar memoria
free(host_a);
free(host_b);
free(host_c);
free(host_c_reference);
cutilSafeCall(hipFree(dev_a));
cutilSafeCall(hipFree(dev_b));
cutilSafeCall(hipFree(dev_c));
return 0;
}
| 690b87cd2481b2d723e1f29fd77318e3efbea56d.cu | #define __BSD_SOURCE
#include <math.h> // fabsf
#include <stdlib.h> // malloc/free
#include <stdio.h> // printf
#include <time.h> // time
#include <sys/time.h> // gettimeofday, timersub
#include <cuda.h> // API de cuda
#include <cutil_inline.h> // Funciones para chequeo de errores
#define N 1024
// dimensiones del bloque
#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 16
// índice de una coordenada bidimensional de una
// matriz NxN en el arreglo que la almacena
__host__ __device__ __inline__ uint index(uint y, uint x) {
return x + y * N;
}
// multiplicación de dos matrices NxN usando memoria compartida
__global__ void mm_shared(const float * a, const float * b, float * c) {
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < N && y < N){
// matriz compartida donde guardamos parte de A temporalmente
__shared__ float tmp_a[BLOCK_HEIGHT][BLOCK_WIDTH];
// acumulador temporal del resultado
float result = 0.0f;
// avanzamos de a bloques
for (uint i = 0; i < N; i += BLOCK_WIDTH) {
// copiar todo el bloque de A a memoria compartida
tmp_a[threadIdx.y][threadIdx.x] = a[index(y, i + threadIdx.x)];
// esperar que todos los threads hayan copiado su valor
// elimino barrera porque el tamaño del bloque es un warp
//__syncthreads();
// actualizar result para los valores de A que tenemos en shared
for (uint j = 0; j < BLOCK_WIDTH; ++j) {
result += tmp_a[threadIdx.y][j] * b[index(i + j, x)];
}
// esperar que todos los threads terminen antes de sobreescribir tmp_a
__syncthreads();
}
// guardar el resultado final
c[index(y,x)] = result;
}
}
// implementación trivial ikj en CPU de referencia
// con algo de suerte el compilador vectoriza
static void mm_cpu(const float * a, const float * b, float * c) {
for (uint y = 0; y < N; ++y) {
for (uint x = 0; x < N; ++x) {
c[index(y,x)] = 0.0f;
}
for (uint k = 0; k < N; ++k) {
for (uint x = 0; x < N; ++x) {
c[index(y, x)] += a[index(y, k)] * b[index(k, x)];
}
}
}
}
// comprobar dos resultados y listar diferencias significativas
static void check_result(const float * reference, const float * other) {
for (uint y = 0; y < N; ++y) {
for (uint x = 0; x < N; ++x) {
if (fabsf(reference[index(y, x)] - other[index(y, x)]) > 0.001f) {
printf("y:%u x:%u reference:%f result:%f\n", y, x, reference[index(y, x)], other[index(y, x)]);
}
}
}
}
int main(int argc, char *argv[]) {
// pedir memoria en el host
size_t matrix_size = N * N * sizeof(float);
float * host_a = (float *) malloc(matrix_size);
float * host_b = (float *) malloc(matrix_size);
float * host_c = (float *) malloc(matrix_size);
float * host_c_reference = (float *) malloc(matrix_size);
// llenar A y B con numeros aleatorios
srand(time(0));
for (uint y = 0; y < N; ++y) {
for (uint x = 0; x < N; ++x) {
host_a[index(y, x)] = (float) rand() / RAND_MAX;
host_b[index(y, x)] = (float) rand() / RAND_MAX;
}
}
// correr en CPU y tomar el tiempo
struct timeval start, finish, elapsed;
double cpusecs;
gettimeofday(&start, NULL);
mm_cpu(host_a, host_b, host_c_reference);
gettimeofday(&finish, NULL);
timersub(&finish, &start, &elapsed);
cpusecs = elapsed.tv_sec + elapsed.tv_usec / 1000000.0;
printf("CPU time: %f\n", cpusecs);
// pedir memoria en la GPU para A, B y C
float * dev_a;
float * dev_b;
float * dev_c;
cutilSafeCall(cudaMalloc((void **) &dev_a, matrix_size));
cutilSafeCall(cudaMalloc((void **) &dev_b, matrix_size));
cutilSafeCall(cudaMalloc((void **) &dev_c, matrix_size));
// copiar A y B al device
cutilSafeCall(cudaMemcpy(dev_a, host_a, matrix_size, cudaMemcpyDefault));
cutilSafeCall(cudaMemcpy(dev_b, host_b, matrix_size, cudaMemcpyDefault));
// configurar la grilla y lanzar el kernel
dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 grid(N/block.x, N/block.y);
mm_shared<<<grid, block>>>(dev_a, dev_b, dev_c);
// esperar que termine
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg("shared_a");
// Copiar datos al host y verificar la validez del resultado
cutilSafeCall(cudaMemcpy(host_c, dev_c, matrix_size, cudaMemcpyDefault));
check_result(host_c_reference, host_c);
// liberar memoria
free(host_a);
free(host_b);
free(host_c);
free(host_c_reference);
cutilSafeCall(cudaFree(dev_a));
cutilSafeCall(cudaFree(dev_b));
cutilSafeCall(cudaFree(dev_c));
return 0;
}
|
247237f164aab498a6ff7203aed622d955163dcc.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> c, Sun Nov 20 20:20:42 2016
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime_api.h>
#define PRECISION_c
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_clowerisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k % block_size == tid)
rB /= rA;
magmaFloatComplex top = __shfl(rB, k % block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_clowerisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_clowerisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_clowerisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
/*
template <int block_size, template <int> class func>
class Switcher {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
func<block_size>(num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
Switcher<block_size-1,func>::switch_func(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
};
template<template <int> class func>
class Switcher<0, func> {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
};
*/
template <>
__device__ __forceinline__ void
magma_clowerisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_clowerisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
//Switcher<MaxBlockSize, magma_clowerisai_regs_kernel>::switch_func(
magma_clowerisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_cupperisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_cupperisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_cupperisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_cupperisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_cupperisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_cupperisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_cupperisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_clowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex top = __shfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_clowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_clowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_clowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_clowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_cupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_cupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_cupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_cupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_cupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_c_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_c_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems magmaFloatComplex*
trisystems
@param[out]
rhs magmaFloatComplex*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_c_matrix L,
magma_c_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
magmaFloatComplex *trisystems,
magmaFloatComplex *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// routine 1
// int r1bs1 = 32;
// int r1bs2 = 1;
// int r1dg1 = min( int( sqrt( float( M->num_rows ))), 65535 );
// int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
// int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
// //printf(" grid: %d x %d x %d\n", r1dg1, r1dg2, r1dg3 );
// dim3 r1block( r1bs1, r1bs2, 1 );
// dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( float( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
// int r2bs1 = 32;
// int r2bs2 = 1;
// int r2dg1 = min( int( sqrt( float( magma_ceildiv( M->num_rows, r2bs2 )))), 65535);
// int r2dg2 = min(magma_ceildiv( M->num_rows, r2dg1 ), 65535);
// int r2dg3 = magma_ceildiv( M->num_rows, r2dg1*r2dg2 );
// dim3 r2block( r2bs1, r2bs2, 1 );
// dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
hipLaunchKernelGGL(( magma_clowerisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
hipLaunchKernelGGL(( magma_cupperisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
| 247237f164aab498a6ff7203aed622d955163dcc.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> c, Sun Nov 20 20:20:42 2016
*/
#include "magmasparse_internal.h"
#include <cuda_profiler_api.h>
#define PRECISION_c
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_clowerisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k % block_size == tid)
rB /= rA;
magmaFloatComplex top = __shfl(rB, k % block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_clowerisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_clowerisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_clowerisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
/*
template <int block_size, template <int> class func>
class Switcher {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
func<block_size>(num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
Switcher<block_size-1,func>::switch_func(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
};
template<template <int> class func>
class Switcher<0, func> {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
};
*/
template <>
__device__ __forceinline__ void
magma_clowerisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_clowerisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
//Switcher<MaxBlockSize, magma_clowerisai_regs_kernel>::switch_func(
magma_clowerisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_cupperisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_cupperisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_cupperisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_cupperisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_cupperisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_cupperisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_cupperisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_clowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex top = __shfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_clowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_clowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_clowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_clowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_cupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_cupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_cupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_cupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_cupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_c_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_c_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems magmaFloatComplex*
trisystems
@param[out]
rhs magmaFloatComplex*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_c_matrix L,
magma_c_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
magmaFloatComplex *trisystems,
magmaFloatComplex *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// routine 1
// int r1bs1 = 32;
// int r1bs2 = 1;
// int r1dg1 = min( int( sqrt( float( M->num_rows ))), 65535 );
// int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
// int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
// //printf(" grid: %d x %d x %d\n", r1dg1, r1dg2, r1dg3 );
// dim3 r1block( r1bs1, r1bs2, 1 );
// dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( float( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
// int r2bs1 = 32;
// int r2bs2 = 1;
// int r2dg1 = min( int( sqrt( float( magma_ceildiv( M->num_rows, r2bs2 )))), 65535);
// int r2dg2 = min(magma_ceildiv( M->num_rows, r2dg1 ), 65535);
// int r2dg3 = magma_ceildiv( M->num_rows, r2dg1*r2dg2 );
// dim3 r2block( r2bs1, r2bs2, 1 );
// dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
magma_clowerisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
magma_cupperisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
b684af2bdcb55b5ebba0cee5f8cf9f9017da9a07.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "LRDglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefLRD.h"
#include "parseInput.h"
#include "LRDhostPrototypes.h"
#include "LRDdevicePrototypes.cuh"
real LRD_RestVoltage = LRD_RestVoltage_0;
__device__ real LRD_cm = LRD_cm_0;
__device__ real LRD_Gna = LRD_Gna_0;
__device__ real LRD_Gtca = LRD_Gtca_0;
__device__ real LRD_Gkp = LRD_Gkp_0;
__device__ real LRD_Gitodv = LRD_Gitodv_0;
__device__ real LRD_Gcab = LRD_Gcab_0;
__device__ real LRD_Gnab = LRD_Gnab_0;
__device__ real LRD_ito = LRD_ito_0;
__device__ real LRD_ikna = LRD_ikna_0;
__device__ real LRD_ikatp = LRD_ikatp_0;
__device__ real LRD_insna = LRD_insna_0;
__device__ real LRD_insk = LRD_insk_0;
__device__ real LRD_cleft = LRD_cleft_0;
void LRD_init(char** res) {
rword resources[] = {
{ "LRD_IV", 1007 },
{ "LRD_Node", 1100 },
{ "LRD_Nodetype", 1100 },
{ "LRD_Patch", 1007 },
{ "LRD_Type", 1100 },
{ "LRD_Vr", 1008 },
{ "LRD_Vrest", 1008 },
{ "LRD_Cm", 1009 },
{ "LRD_Gna", 1112 },
{ "LRD_Gtca", 1113 },
{ "LRD_Gkp", 1114 },
{ "LRD_Gitodv", 1115 },
{ "LRD_Gcab", 1116 },
{ "LRD_Gnab", 1117 },
{ "LRD_ito", 1118 },
{ "LRD_ikna", 1119 },
{ "LRD_ikatp", 1120 },
{ "LRD_insna", 1121 },
{ "LRD_insk", 1122 },
{ "LRD_cleft", 1123 },
{ NULL, 0 }
};
int i, j, c, r;
int cmd;
real temp;
i = 0;
while (res[i] != NULL) {
cmd = FindCommand(resources, res[i]);
switch (cmd) {
case 1007:
/*iv = GetRealArray(res[i]);
p = (real*)(&LRD_RestPatch);
c = GetNumValues(res[i]);
if (c > LRD_PatchSize) {
c = LRD_PatchSize;
}
for (j = 0; j<c; j++) {
p[j] = iv[j];
}*/
break;
case 1008:
LRD_RestVoltage = GetRealValue(res[i]);
break;
case 1009:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_cm, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1100:
//LRD_NodeType = GetByteValue(res[i]);
break;
case 1112:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_Gna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1113:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_Gtca, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1114:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_Gkp, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1115:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_Gitodv, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1116:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_Gcab, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1117:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_Gnab, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1118:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_ito, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1119:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_ikna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1120:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_ikatp, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1121:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_insna, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1122:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_insk, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1123:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(LRD_cleft, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
}
i++;
}
}
void LRD_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
hipHostMalloc((void**)&(gate_h->vm), memSize, 0);
hipHostMalloc((void**)&(gate_h->m), memSize, 0);
hipHostMalloc((void**)&(gate_h->h), memSize, 0);
hipHostMalloc((void**)&(gate_h->j), memSize, 0);
hipHostMalloc((void**)&(gate_h->d), memSize, 0);
hipHostMalloc((void**)&(gate_h->f), memSize, 0);
hipHostMalloc((void**)&(gate_h->b), memSize, 0);
hipHostMalloc((void**)&(gate_h->g), memSize, 0);
hipHostMalloc((void**)&(gate_h->xr), memSize, 0);
hipHostMalloc((void**)&(gate_h->xs1), memSize, 0);
hipHostMalloc((void**)&(gate_h->xs2), memSize, 0);
hipHostMalloc((void**)&(gate_h->zdv), memSize, 0);
hipHostMalloc((void**)&(gate_h->ydv), memSize, 0);
hipHostMalloc((void**)&(gate_h->nai), memSize, 0);
hipHostMalloc((void**)&(gate_h->ki), memSize, 0);
hipHostMalloc((void**)&(gate_h->nsr), memSize, 0);
hipHostMalloc((void**)&(gate_h->nao), memSize, 0);
hipHostMalloc((void**)&(gate_h->ko), memSize, 0);
hipHostMalloc((void**)&(gate_h->cao), memSize, 0);
hipHostMalloc((void**)&(gate_h->cai), memSize, 0);
hipHostMalloc((void**)&(gate_h->jsr), memSize, 0);
hipHostMalloc((void**)&(gate_h->caiont), memSize, 0);
hipHostMalloc((void**)&(gate_h->BOOL), memSize, 0);
hipHostMalloc((void**)&(gate_h->tcicr), memSize, 0);
hipHostMalloc((void**)&(gate_h->tjsrol), memSize, 0);
hipHostMalloc((void**)&(gate_h->dcaiont), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->j, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->d, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->f, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->b, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->g, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xs1, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xs2, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->zdv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->ydv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->nai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->ki, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->nsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->nao, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->ko, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->cao, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->cai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->jsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->caiont, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->BOOL, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->tcicr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->tjsrol, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->dcaiont, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->j, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->d, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->f, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->b, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->g, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xs1, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xs2, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->zdv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->ydv, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->nai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->ki, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->nsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->nao, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->ko, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->cai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->jsr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->caiont, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->BOOL, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->tcicr, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->tjsrol, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->dcaiont, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = LRD_RestVoltage;
gate_h->m[idx] = 0.0008;
gate_h->h[idx] = 0.993771;
gate_h->j[idx] = 0.995727;
gate_h->d[idx] = 3.210618e-06;
gate_h->f[idx] = 0.999837;
gate_h->b[idx] = 0.000970231;
gate_h->g[idx] = 0.994305;
gate_h->xr[idx] = 0.000124042;
gate_h->xs1[idx] = 0.00445683;
gate_h->xs2[idx] = 0.00445683;
gate_h->zdv[idx] = 0.0120892;
gate_h->ydv[idx] = 0.999978;
gate_h->nai[idx] = 9.0;
gate_h->ki[idx] = 141.2;
gate_h->nsr[idx] = 1.838;
gate_h->nao[idx] = 140;
gate_h->ko[idx] = 4.5;
gate_h->cao[idx] = 1.8;
gate_h->cai[idx] = 0.00006;
gate_h->jsr[idx] = 1.838;
gate_h->caiont[idx] = 0;
gate_h->BOOL[idx] = 0;
gate_h->tcicr[idx] = -25;
gate_h->tjsrol[idx] = -25;
gate_h->dcaiont[idx] = 0;
}
CudaSafeCall(hipMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, hipMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
qpH[i++] = gate_devF->j;
qpH[i++] = gate_devF->;
qpH[i++] = gate_devF->f;
qpH[i++] = gate_devF->b;
qpH[i++] = gate_devF->g;
qpH[i++] = gate_devF->xr;
qpH[i++] = gate_devF->xs1;
qpH[i++] = gate_devF->xs2;
qpH[i++] = gate_devF->zdv;
qpH[i++] = gate_devF->ydv;
qpH[i++] = gate_devF->nai;
qpH[i++] = gate_devF->ki;
qpH[i++] = gate_devF->nsr;
qpH[i++] = gate_devF->nao;
qpH[i++] = gate_devF->ko;
qpH[i++] = gate_devF->cao;
CudaSafeCall(hipMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
qpH[i++] = gate_dev->j;
qpH[i++] = gate_dev->;
qpH[i++] = gate_dev->f;
qpH[i++] = gate_dev->b;
qpH[i++] = gate_dev->g;
qpH[i++] = gate_dev->xr;
qpH[i++] = gate_dev->xs1;
qpH[i++] = gate_dev->xs2;
qpH[i++] = gate_dev->zdv;
qpH[i++] = gate_dev->ydv;
qpH[i++] = gate_dev->nai;
qpH[i++] = gate_dev->ki;
qpH[i++] = gate_dev->nsr;
qpH[i++] = gate_dev->nao;
qpH[i++] = gate_dev->ko;
qpH[i++] = gate_dev->cao;
CudaSafeCall(hipMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void LRD_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(hipMemcpy2D((void *)gate_h->vm, *pitch, (void *)gate_dev->vm,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->m, *pitch, (void *)gate_dev->m,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->h, *pitch, (void *)gate_dev->h,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->j, *pitch, (void *)gate_dev->j,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->d, *pitch, (void *)gate_dev->d,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->f, *pitch, (void *)gate_dev->f,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->b, *pitch, (void *)gate_dev->b,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->g, *pitch, (void *)gate_dev->g,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xr, *pitch, (void *)gate_dev->xr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xs1, *pitch, (void *)gate_dev->xs1,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xs2, *pitch, (void *)gate_dev->xs2,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->zdv, *pitch, (void *)gate_dev->zdv,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->ydv, *pitch, (void *)gate_dev->ydv,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->nai, *pitch, (void *)gate_dev->nai,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->ki, *pitch, (void *)gate_dev->ki,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->nsr, *pitch, (void *)gate_dev->nsr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->nao, *pitch, (void *)gate_dev->nao,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->ko, *pitch, (void *)gate_dev->ko,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->cao, *pitch, (void *)gate_dev->cao,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->cai, *pitch, (void *)gate_dev->cai,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->jsr, *pitch, (void *)gate_dev->jsr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->caiont, *pitch, (void *)gate_dev->caiont,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->BOOL, *pitch, (void *)gate_dev->BOOL,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->tcicr, *pitch, (void *)gate_dev->tcicr,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->tjsrol, *pitch, (void *)gate_dev->tjsrol,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->dcaiont, *pitch, (void *)gate_dev->dcaiont,
memSize, memSize, 1, hipMemcpyDeviceToHost));
}
void LRD_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
hipHostFree(gate_h->vm);
hipHostFree(gate_h->m);
hipHostFree(gate_h->h);
hipHostFree(gate_h->j);
hipHostFree(gate_h->d);
hipHostFree(gate_h->f);
hipHostFree(gate_h->b);
hipHostFree(gate_h->g);
hipHostFree(gate_h->xr);
hipHostFree(gate_h->xs1);
hipHostFree(gate_h->xs2);
hipHostFree(gate_h->zdv);
hipHostFree(gate_h->ydv);
hipHostFree(gate_h->nai);
hipHostFree(gate_h->ki);
hipHostFree(gate_h->nsr);
hipHostFree(gate_h->nao);
hipHostFree(gate_h->ko);
hipHostFree(gate_h->cao);
hipHostFree(gate_h->cai);
hipHostFree(gate_h->jsr);
hipHostFree(gate_h->caiont);
hipHostFree(gate_h->BOOL);
hipHostFree(gate_h->tcicr);
hipHostFree(gate_h->tjsrol);
hipHostFree(gate_h->dcaiont);
hipHostFree(gate_h->qp);
hipFree(gate_dev->vm);
hipFree(gate_dev->m);
hipFree(gate_dev->h);
hipFree(gate_dev->j);
hipFree(gate_dev->d);
hipFree(gate_dev->f);
hipFree(gate_dev->b);
hipFree(gate_dev->g);
hipFree(gate_dev->xr);
hipFree(gate_dev->xs1);
hipFree(gate_dev->xs2);
hipFree(gate_dev->zdv);
hipFree(gate_dev->ydv);
hipFree(gate_dev->nai);
hipFree(gate_dev->ki);
hipFree(gate_dev->nsr);
hipFree(gate_dev->nao);
hipFree(gate_dev->ko);
hipFree(gate_dev->cao);
hipFree(gate_dev->cai);
hipFree(gate_dev->jsr);
hipFree(gate_dev->caiont);
hipFree(gate_dev->BOOL);
hipFree(gate_dev->tcicr);
hipFree(gate_dev->tjsrol);
hipFree(gate_dev->dcaiont);
hipFree(gate_dev->qp);
hipFree(gate_devF->vm);
hipFree(gate_devF->m);
hipFree(gate_devF->h);
hipFree(gate_devF->j);
hipFree(gate_devF->d);
hipFree(gate_devF->f);
hipFree(gate_devF->b);
hipFree(gate_devF->g);
hipFree(gate_devF->xr);
hipFree(gate_devF->xs1);
hipFree(gate_devF->xs2);
hipFree(gate_devF->zdv);
hipFree(gate_devF->ydv);
hipFree(gate_devF->nai);
hipFree(gate_devF->ki);
hipFree(gate_devF->nsr);
hipFree(gate_devF->nao);
hipFree(gate_devF->ko);
hipFree(gate_devF->cao);
hipFree(gate_devF->cai);
hipFree(gate_devF->jsr);
hipFree(gate_devF->caiont);
hipFree(gate_devF->BOOL);
hipFree(gate_devF->tcicr);
hipFree(gate_devF->tjsrol);
hipFree(gate_devF->dcaiont);
hipFree(gate_devF->qp);
hipFree(cudaMatrixINT->type);
hipFree(cudaMatrixINT->rows);
hipFree(cudaMatrixINT->maxnz);
hipFree(cudaMatrixINT->csep);
hipFree(cudaMatrixINT->jcoef);
hipFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_LRD(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
/* declare variables */
real vm,BOOL,tcicr,tjsrol,csqn;
real LRD_RTF;
real LRD_Gkr, LRD_Gks,LRD_Gki,LRD_Gkatp;
real LRD_Ena,LRD_Etca,LRD_Ekr,LRD_Eks,LRD_Eki,LRD_Ekp;
real LRD_Ekna,LRD_Ekatp,LRD_Ekdv,LRD_Ecan,LRD_Enan;
real m,h,j,am,bm,ah,bh,aj,bj,Ina;
real d,f,dss,taud,fss,tauf,Ibarca,Ibarna,Ibark;
real fca,Ilca,Ilcana,Ilcak,Ilcatot;
real b,g,bss,taub,gss,taug,Itca;
real xr,r,xrss,tauxr,Ikr;
real xs1,xs2,xs1ss,xs2ss,tauxs1,tauxs2,Iks;
real aki,bki,kin,Ikti;
real kp,Ikp;
real Inaca;
real sigma,fnak,Inak;
real Ipca;
real Icab;
real Inab;
real pona,pov,Ikna;
real patp,gkbaratp,Ikatp;
real Ibarnsna,Ibarnsk,Insna,Insk;
real rvdv,Ito;
real azdv,bzdv,tauzdv,zdvss,zdv;
real aydv,bydv,tauydv,ydvss,ydv;
real naiont,kiont,caiont,Itotal;
/*ions*/
real nao,ko,cao;
real dnao,dko,dcao;
real nai,ki;
real dnai,dki;
real itr;
real nsr,kleak,ileak,iup,dnsr;
/* JSR CICR */
real dcaiont,caiontold;
real magrel,on,off,irelcicr;
real greljsrol,ireljsrol;
real trpn,cmdn;
real jsr,bjsr,cjsr,djsr;
/* cai update here */
real cai,catotal,bmyo,cmyo,dmyo,gpig,dcai;
real vcell,ageo,acap,vmyo,vnsr,vjsr,vcleft;
LRD_RTF = LRD_R*LRD_temp/LRD_frdy;
vm = g_dev.vm[i2d];
m = g_dev.m[i2d];
h = g_dev.h[i2d];
j = g_dev.j[i2d];
d = g_dev.d[i2d];
f = g_dev.f[i2d];
b = g_dev.b[i2d];
g = g_dev.g[i2d];
xr = g_dev.xr[i2d];
xs1 = g_dev.xs1[i2d];
xs2 = g_dev.xs2[i2d];
zdv = g_dev.zdv[i2d];
ydv = g_dev.ydv[i2d];
nai = g_dev.nai[i2d];
ki = g_dev.ki[i2d];
nsr = g_dev.nsr[i2d];
nao = g_dev.nao[i2d];
ko = g_dev.ko[i2d];
cao = g_dev.cao[i2d];
cai = g_dev.cai[i2d];
jsr = g_dev.jsr[i2d];
caiont = g_dev.caiont[i2d];
BOOL = g_dev.BOOL[i2d];
tcicr = g_dev.tcicr[i2d];
tjsrol = g_dev.tjsrol[i2d];
dcaiont = g_dev.dcaiont[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
/* Declare varying G's and E's */
LRD_Gkr = 0.02614*sqrt(ko/5.4);
LRD_Gks = 0.433*(1+0.6/(1+pow((0.000038/cai),1.4)));
LRD_Gki = 0.75*(sqrt(ko/5.4));
LRD_Gkatp = 0.000195/nicholsarea;
LRD_Ena = (LRD_RTF)*log(nao/nai);
LRD_Etca = 0.5*(LRD_RTF)*log(cao/cai);
LRD_Ekr = (LRD_RTF)*log(ko/ki);
LRD_Eks = (LRD_RTF)*log((ko+prnak*nao)/(ki+prnak*nai));
LRD_Eki = (LRD_RTF)*log(ko/ki);
LRD_Ekp = LRD_Eki;
LRD_Ekna = LRD_Ekr;
LRD_Ekatp = LRD_Ekr;
LRD_Ekdv = LRD_Ekr;
LRD_Ecan = LRD_Etca;
LRD_Enan = LRD_Ena;
/* Na current [15] */
am = 0.32*(vm+47.13)/(1-exp(-0.1*(vm+47.13)));
bm = 0.08*exp(-vm/11);
if (vm < -40) {
ah = 0.135*exp((80+vm)/-6.8);
bh = 3.56*exp(0.079*vm)+310000*exp(0.35*vm);
aj = (-127140*exp(0.2444*vm)-0.00003474*exp(-0.04391*vm))*((vm+37.78)/(1+exp(0.311*(vm+79.23))));
bj = (0.1212*exp(-0.01052*vm))/(1+exp(-0.1378*(vm+40.14)));
} else {
ah = 0;
bh = 1/(0.13*(1+exp((vm+10.66)/-11.1)));
aj = 0;
bj = (0.3*exp(-0.0000002535*vm))/(1+exp(-0.1*(vm+32)));
}
Ina=LRD_Gna*(m*m*m*h*j)*(vm-LRD_Ena);
/* L-type Calcium current [14,15] */
dss = 1/(1+exp(-(vm+10)/6.24));
taud = dss*(1-exp(-(vm+10)/6.24))/(0.035*(vm+10));
fss = (1/(1+exp((vm+32)/8)))+(0.6/(1+exp((50-vm)/20)));
tauf = 1/(0.0197*exp(-0.0337*0.0337*(vm+10)*(vm+10))+0.02);
Ibarca = pca*zca*zca*((vm*LRD_frdy)/(LRD_RTF))*((gacai*cai*exp((zca*vm)/(LRD_RTF))-gacao*cao)/(exp((zca*vm)/(LRD_RTF))-1));
Ibarna = pna*zna*zna*((vm*LRD_frdy)/(LRD_RTF))*((ganai*nai*exp((zna*vm)/(LRD_RTF))-ganao*nao)/(exp((zna*vm)/(LRD_RTF))-1));
Ibark = pk*zk*zk*((vm*LRD_frdy)/(LRD_RTF))*((gaki*ki*exp((zk*vm)/(LRD_RTF))-gako*ko)/(exp((zk*vm)/(LRD_RTF))-1));
fca = 1/(1+cai/kmca);
Ilca = d*f*fca*Ibarca;
Ilcana = d*f*fca*Ibarna;
Ilcak = d*f*fca*Ibark;
Ilcatot = Ilca+Ilcana+Ilcak;
/* T-type Calcium current [13] */
bss = 1/(1+exp(-(vm+14)/10.8));
taub = 3.7+6.1/(1+exp((vm+25)/4.5));
gss = 1/(1+exp((vm+60)/5.6));
if (vm<=0) {
taug = -0.875*vm+12;
} else {
taug = 12;
}
Itca = LRD_Gtca*b*b*g*(vm-LRD_Etca);
/* K current - Rapid [13] */
xrss = 1/(1+exp(-(vm+21.5)/7.5));
tauxr = 1/(0.00138*(vm+14.2)/(1-exp(-0.123*(vm+14.2)))+0.00061*(vm+38.9)/(exp(0.145*(vm+38.9))-1));
r = 1/(1+exp((vm+9)/22.4));
Ikr = LRD_Gkr*xr*r*(vm-LRD_Ekr);
/* K current - Slow [10,13] */
xs1ss = 1/(1+exp(-(vm-1.5)/16.7));
xs2ss = xs1ss;
tauxs1 = 1/(0.0000719*(vm+30)/(1-exp(-0.148*(vm+30)))+0.000131*(vm+30)/(exp(0.0687*(vm+30))-1));
tauxs2 = 4*tauxs1;
Iks = LRD_Gks*xs1*xs2*(vm-LRD_Eks);
/* K current - Time independent [15] */
aki = 1.02/(1+exp(0.2385*(vm-LRD_Eki-59.215)));
bki = (0.49124*exp(0.08032*(vm-LRD_Eki+5.476))+exp(0.06175*(vm-LRD_Eki-594.31)))/(1+exp(-0.5143*(vm-LRD_Eki+4.753)));
kin = aki/(aki+bki);
Ikti = LRD_Gki*kin*(vm-LRD_Eki);
/* K current - Plateau [15] */
kp = 1/(1+exp((7.488-vm)/5.98));
Ikp = LRD_Gkp*kp*(vm-LRD_Ekp);
/* Na-Ca exchanger [6,14,15] */
Inaca = c1*exp((gammas-1)*vm/(LRD_RTF))*((exp(vm/(LRD_RTF))*nai*nai*nai*cao-nao*nao*nao*cai)/(1+c2*exp((gammas-1)*vm/(LRD_RTF))*(exp(vm/(LRD_RTF))*nai*nai*nai*cao+nao*nao*nao*cai)));
/* Na-K pump [15] */
sigma = (exp(nao/67.3)-1)/7;
fnak = 1/(1+0.1245*exp((-0.1*vm)/(LRD_RTF))+0.0365*sigma*exp((-vm)/(LRD_RTF)));
Inak = Ibarnak*fnak*(1/(1+kmnai*kmnai/(nai*nai)))*(ko/(ko+kmko));
/* Sarcolemmal Ca pump [15] */
Ipca = (Ibarpca*cai)/(kmpca+cai);
/* Ca background current [15] */
Icab = LRD_Gcab*(vm-LRD_Ecan);
/* Na background current [15] */
Inab = LRD_Gnab*(vm-LRD_Enan);
/* Na activated K current [6] */
pona = 0.85/(1+pow((kdkna/nai),2.8));
pov = 0.8-(0.65/(1+exp((vm+125)/15)));
Ikna = LRD_ikna*LRD_Gkna*pona*pov*(vm-LRD_Ekna);
/* ATP sensitive K current [11] */
patp = 1/(1+(pow((atpi/katp),hatp)));
gkbaratp = LRD_Gkatp*patp*(pow((ko/4),natp));
Ikatp = LRD_ikatp*gkbaratp*(vm-LRD_Ekatp);
/* Non-specific Ca-activated current [14,15] */
Ibarnsna = pnsca*zna*zna*((vm*LRD_frdy)/(LRD_RTF))*((ganai*nai*exp((zna*vm)/(LRD_RTF))-ganao*nao)/(exp((zna*vm)/(LRD_RTF))-1));
Ibarnsk = pnsca*zk*zk*((vm*LRD_frdy)/(LRD_RTF))*((gaki*ki*exp((zk*vm)/(LRD_RTF))-gako*ko)/(exp((zk*vm)/(LRD_RTF))-1));
Insna = LRD_insna*Ibarnsna/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
Insk = LRD_insk*Ibarnsk/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
/* Transient outward current */
rvdv = exp(vm/100);
azdv = (10*exp((vm-40)/25))/(1+exp((vm-40)/25));
bzdv = (10*exp(-(vm+90)/25))/(1+exp(-(vm+90)/25));
tauzdv = 1/(azdv+bzdv);
zdvss = azdv/(azdv+bzdv);
aydv = 0.015/(1+exp((vm+60)/5));
bydv = (0.1*exp((vm+25)/5))/(1+exp((vm+25)/5));
tauydv = 1/(aydv+bydv);
ydvss = aydv/(aydv+bydv);
Ito = LRD_ito*LRD_Gitodv*zdv*zdv*zdv*ydv*rvdv*(vm-LRD_Ekdv);
/* Summing currents (inactive currents are set to zero with activation variables) */
naiont = Ina+Inab+Ilcana+3*Inak+3*Inaca+Insna;
kiont = Ikr+Iks+Ikti+Ikp+Ilcak+-2*Inak+Insk+Ito+Ikna+Ikatp;
caiont = Ilca+Icab+Ipca-2*Inaca+Itca;
Itotal = LRD_cm*(naiont+kiont+caiont); /* uA/cm2 */
if (((t-tcicr)>80) && (vm<-30)) {
BOOL = 0;
g_dev.BOOL[i2d] = BOOL;
}
/* Put voltage update here */
fv += -Itotal;
g_devF.vm[i2d] = fv;
/* change in cleft concentration */
dnao = LRD_cleft*((nabm-nao)/taudiff+naiont*acap*LRD_cm/(vcleft*LRD_frdy));
dko = LRD_cleft*((kbm-ko)/taudiff+kiont*acap*LRD_cm/(vcleft*LRD_frdy));
dcao = LRD_cleft*((cabm-cao)/taudiff+caiont*acap*LRD_cm/(vcleft*LRD_frdy*2));
/* change in nai and ki concentration */
dnai = -LRD_cm*(naiont*acap)/(vmyo*zna*LRD_frdy); /* dnai/dt */
dki = -LRD_cm*(kiont*acap)/(vmyo*zk*LRD_frdy); /* dki/dt */
/* change in itr [14] */
itr = (nsr-jsr)/tautr;
/* change in nsr [14] */
kleak = iupbar/nsrbar;
ileak = kleak*nsr;
iup = iupbar*cai/(cai+kmup);
dnsr = (iup-ileak-itr*vjsr/vnsr); /* dnsr/dt */
/* Calcium-induced-calcium-release (CICR) criteia [6] */
if ((vm>-35) && (((caiont-caiontold)/dt)<dcaiont) && (BOOL==0)){
BOOL = 1;
tcicr = t;
g_dev.BOOL[i2d] = BOOL;
g_dev.tcicr[i2d] = tcicr; /* changes reference time */
}
on = 1/(1+exp((-(t-tcicr)+4)/.5));
off = 1-on;
magrel = 1/(1+exp(((Ilca+Icab+Ipca-2*Inaca+Itca)+5)/0.9));
irelcicr = gmaxrel*on*off*magrel*(jsr-cai);
/* JSR Calciium overload [13] */
greljsrol = grelbarjsrol*(1-exp(-(t-tjsrol)/tauon))*exp(-(t-tjsrol)/tauoff);
ireljsrol = greljsrol*(jsr-cai);
csqn = csqnbar*(jsr/(jsr+kmcsqn));
djsr = dt*(itr-irelcicr-ireljsrol);
bjsr = csqnbar-csqn-djsr-jsr+kmcsqn;
cjsr = kmcsqn*(csqn+djsr+jsr);
jsr =(sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
/* Calcium buffers in myoplasm [15] */
trpn = trpnbar*(cai/(cai+kmtrpn));
cmdn = cmdnbar*(cai/(cai+kmcmdn));
/* change in cai concentration [13] */
dcai = -dt*(((LRD_cm*caiont*acap)/(vmyo*zca*LRD_frdy))+((iup-ileak)*vnsr/vmyo)-(irelcicr*vjsr/vmyo)-(ireljsrol*vjsr/vmyo));
catotal = trpn+cmdn+dcai+cai;
bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn;
cmyo = (kmcmdn*kmtrpn)-(catotal*(kmtrpn+kmcmdn))+(trpnbar*kmcmdn)+(cmdnbar*kmtrpn);
dmyo = -kmtrpn*kmcmdn*catotal;
gpig = sqrt(bmyo*bmyo-3*cmyo);
cai = ((2*gpig/3)*cos(acos((9*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2*pow((bmyo*bmyo-3*cmyo),1.5)))/3)-(bmyo/3));
/* Calcium overload criteria [15] */
if((csqn>=csqnth) && ((t-tjsrol)>50)){
printf("Spontaneous Release occured at time %lf at node %i\n",t,i);
tjsrol = t;
g_dev.tjsrol[i2d] = tjsrol; /* changes reference time */
}
g_devF.m[i2d] = am*(1.0-m) - bm*m;
g_devF.h[i2d] = ah*(1.0-h) - bh*h;
g_devF.j[i2d] = aj*(1.0-j) - bj*j;
g_devF.d[i2d] = (dss/taud)*(1-d)-(1-dss)*(d/taud);
g_devF.f[i2d] = (fss/tauf)*(1-f)-(1-fss)*(f/tauf);
g_devF.b[i2d] = (bss/taub)*(1-b)-(1-bss)*(b/taub);
g_devF.g[i2d] = (gss/taug)*(1-g)-(1-gss)*(g/taug);
g_devF.xr[i2d] = (xrss/tauxr)*(1-xr)-(1-xrss)*(xr/tauxr);
g_devF.xs1[i2d] = (xs1ss/tauxs1)*(1-xs1)-(1-xs1ss)*(xs1/tauxs1);
g_devF.xs2[i2d] = (xs2ss/tauxs2)*(1-xs2)-(1-xs2ss)*(xs2/tauxs2);
g_devF.zdv[i2d] = (zdvss/tauzdv)*(1-zdv)-(1-zdvss)*(zdv/tauzdv);
g_devF.ydv[i2d] = (ydvss/tauydv)*(1-ydv)-(1-ydvss)*(ydv/tauydv);
g_devF.nai[i2d] = dnai;
g_devF.ki[i2d] = dki;
g_devF.nsr[i2d] = dnsr;
g_devF.nao[i2d] = dnao;
g_devF.ko[i2d] = dko;
g_devF.cao[i2d] = dcao;
/* assign Temp variables to memory */
g_devF.caiont[i2d] = caiont;
g_devF.cai[i2d] = cai;
g_devF.jsr[i2d] = jsr;
g_devF.dcaiont[i2d] = (caiont-caiontold)/dt;
} | b684af2bdcb55b5ebba0cee5f8cf9f9017da9a07.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "LRDglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefLRD.h"
#include "parseInput.h"
#include "LRDhostPrototypes.h"
#include "LRDdevicePrototypes.cuh"
real LRD_RestVoltage = LRD_RestVoltage_0;
__device__ real LRD_cm = LRD_cm_0;
__device__ real LRD_Gna = LRD_Gna_0;
__device__ real LRD_Gtca = LRD_Gtca_0;
__device__ real LRD_Gkp = LRD_Gkp_0;
__device__ real LRD_Gitodv = LRD_Gitodv_0;
__device__ real LRD_Gcab = LRD_Gcab_0;
__device__ real LRD_Gnab = LRD_Gnab_0;
__device__ real LRD_ito = LRD_ito_0;
__device__ real LRD_ikna = LRD_ikna_0;
__device__ real LRD_ikatp = LRD_ikatp_0;
__device__ real LRD_insna = LRD_insna_0;
__device__ real LRD_insk = LRD_insk_0;
__device__ real LRD_cleft = LRD_cleft_0;
void LRD_init(char** res) {
rword resources[] = {
{ "LRD_IV", 1007 },
{ "LRD_Node", 1100 },
{ "LRD_Nodetype", 1100 },
{ "LRD_Patch", 1007 },
{ "LRD_Type", 1100 },
{ "LRD_Vr", 1008 },
{ "LRD_Vrest", 1008 },
{ "LRD_Cm", 1009 },
{ "LRD_Gna", 1112 },
{ "LRD_Gtca", 1113 },
{ "LRD_Gkp", 1114 },
{ "LRD_Gitodv", 1115 },
{ "LRD_Gcab", 1116 },
{ "LRD_Gnab", 1117 },
{ "LRD_ito", 1118 },
{ "LRD_ikna", 1119 },
{ "LRD_ikatp", 1120 },
{ "LRD_insna", 1121 },
{ "LRD_insk", 1122 },
{ "LRD_cleft", 1123 },
{ NULL, 0 }
};
int i, j, c, r;
int cmd;
real temp;
i = 0;
while (res[i] != NULL) {
cmd = FindCommand(resources, res[i]);
switch (cmd) {
case 1007:
/*iv = GetRealArray(res[i]);
p = (real*)(&LRD_RestPatch);
c = GetNumValues(res[i]);
if (c > LRD_PatchSize) {
c = LRD_PatchSize;
}
for (j = 0; j<c; j++) {
p[j] = iv[j];
}*/
break;
case 1008:
LRD_RestVoltage = GetRealValue(res[i]);
break;
case 1009:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_cm, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1100:
//LRD_NodeType = GetByteValue(res[i]);
break;
case 1112:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_Gna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1113:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_Gtca, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1114:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_Gkp, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1115:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_Gitodv, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1116:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_Gcab, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1117:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_Gnab, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1118:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_ito, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1119:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_ikna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1120:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_ikatp, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1121:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_insna, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1122:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_insk, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1123:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(LRD_cleft, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
}
i++;
}
}
void LRD_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
cudaHostAlloc((void**)&(gate_h->vm), memSize, 0);
cudaHostAlloc((void**)&(gate_h->m), memSize, 0);
cudaHostAlloc((void**)&(gate_h->h), memSize, 0);
cudaHostAlloc((void**)&(gate_h->j), memSize, 0);
cudaHostAlloc((void**)&(gate_h->d), memSize, 0);
cudaHostAlloc((void**)&(gate_h->f), memSize, 0);
cudaHostAlloc((void**)&(gate_h->b), memSize, 0);
cudaHostAlloc((void**)&(gate_h->g), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xs1), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xs2), memSize, 0);
cudaHostAlloc((void**)&(gate_h->zdv), memSize, 0);
cudaHostAlloc((void**)&(gate_h->ydv), memSize, 0);
cudaHostAlloc((void**)&(gate_h->nai), memSize, 0);
cudaHostAlloc((void**)&(gate_h->ki), memSize, 0);
cudaHostAlloc((void**)&(gate_h->nsr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->nao), memSize, 0);
cudaHostAlloc((void**)&(gate_h->ko), memSize, 0);
cudaHostAlloc((void**)&(gate_h->cao), memSize, 0);
cudaHostAlloc((void**)&(gate_h->cai), memSize, 0);
cudaHostAlloc((void**)&(gate_h->jsr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->caiont), memSize, 0);
cudaHostAlloc((void**)&(gate_h->BOOL), memSize, 0);
cudaHostAlloc((void**)&(gate_h->tcicr), memSize, 0);
cudaHostAlloc((void**)&(gate_h->tjsrol), memSize, 0);
cudaHostAlloc((void**)&(gate_h->dcaiont), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->j, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->d, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->f, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->b, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->g, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xs1, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xs2, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->zdv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->ydv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->nai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->ki, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->nsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->nao, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->ko, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->cao, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->cai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->jsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->caiont, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->BOOL, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->tcicr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->tjsrol, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->dcaiont, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->j, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->d, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->f, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->b, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->g, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xs1, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xs2, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->zdv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->ydv, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->nai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->ki, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->nsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->nao, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->ko, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->cai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->jsr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->caiont, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->BOOL, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->tcicr, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->tjsrol, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->dcaiont, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = LRD_RestVoltage;
gate_h->m[idx] = 0.0008;
gate_h->h[idx] = 0.993771;
gate_h->j[idx] = 0.995727;
gate_h->d[idx] = 3.210618e-06;
gate_h->f[idx] = 0.999837;
gate_h->b[idx] = 0.000970231;
gate_h->g[idx] = 0.994305;
gate_h->xr[idx] = 0.000124042;
gate_h->xs1[idx] = 0.00445683;
gate_h->xs2[idx] = 0.00445683;
gate_h->zdv[idx] = 0.0120892;
gate_h->ydv[idx] = 0.999978;
gate_h->nai[idx] = 9.0;
gate_h->ki[idx] = 141.2;
gate_h->nsr[idx] = 1.838;
gate_h->nao[idx] = 140;
gate_h->ko[idx] = 4.5;
gate_h->cao[idx] = 1.8;
gate_h->cai[idx] = 0.00006;
gate_h->jsr[idx] = 1.838;
gate_h->caiont[idx] = 0;
gate_h->BOOL[idx] = 0;
gate_h->tcicr[idx] = -25;
gate_h->tjsrol[idx] = -25;
gate_h->dcaiont[idx] = 0;
}
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->f, *pitch, (void *)gate_h->f,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->b, *pitch, (void *)gate_h->b,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xr, *pitch, (void *)gate_h->xr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xs1, *pitch, (void *)gate_h->xs1,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xs2, *pitch, (void *)gate_h->xs2,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->zdv, *pitch, (void *)gate_h->zdv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->ydv, *pitch, (void *)gate_h->ydv,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->nai, *pitch, (void *)gate_h->nai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->ki, *pitch, (void *)gate_h->ki,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->nsr, *pitch, (void *)gate_h->nsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->nao, *pitch, (void *)gate_h->nao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->ko, *pitch, (void *)gate_h->ko,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->cao, *pitch, (void *)gate_h->cao,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->cai, *pitch, (void *)gate_h->cai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->jsr, *pitch, (void *)gate_h->jsr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->caiont, *pitch, (void *)gate_h->caiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->BOOL, *pitch, (void *)gate_h->BOOL,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->tcicr, *pitch, (void *)gate_h->tcicr,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->tjsrol, *pitch, (void *)gate_h->tjsrol,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->dcaiont, *pitch, (void *)gate_h->dcaiont,
memSize, memSize, 1, cudaMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
qpH[i++] = gate_devF->j;
qpH[i++] = gate_devF->;
qpH[i++] = gate_devF->f;
qpH[i++] = gate_devF->b;
qpH[i++] = gate_devF->g;
qpH[i++] = gate_devF->xr;
qpH[i++] = gate_devF->xs1;
qpH[i++] = gate_devF->xs2;
qpH[i++] = gate_devF->zdv;
qpH[i++] = gate_devF->ydv;
qpH[i++] = gate_devF->nai;
qpH[i++] = gate_devF->ki;
qpH[i++] = gate_devF->nsr;
qpH[i++] = gate_devF->nao;
qpH[i++] = gate_devF->ko;
qpH[i++] = gate_devF->cao;
CudaSafeCall(cudaMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
qpH[i++] = gate_dev->j;
qpH[i++] = gate_dev->;
qpH[i++] = gate_dev->f;
qpH[i++] = gate_dev->b;
qpH[i++] = gate_dev->g;
qpH[i++] = gate_dev->xr;
qpH[i++] = gate_dev->xs1;
qpH[i++] = gate_dev->xs2;
qpH[i++] = gate_dev->zdv;
qpH[i++] = gate_dev->ydv;
qpH[i++] = gate_dev->nai;
qpH[i++] = gate_dev->ki;
qpH[i++] = gate_dev->nsr;
qpH[i++] = gate_dev->nao;
qpH[i++] = gate_dev->ko;
qpH[i++] = gate_dev->cao;
CudaSafeCall(cudaMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void LRD_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(cudaMemcpy2D((void *)gate_h->vm, *pitch, (void *)gate_dev->vm,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->m, *pitch, (void *)gate_dev->m,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->h, *pitch, (void *)gate_dev->h,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->j, *pitch, (void *)gate_dev->j,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->d, *pitch, (void *)gate_dev->d,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->f, *pitch, (void *)gate_dev->f,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->b, *pitch, (void *)gate_dev->b,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->g, *pitch, (void *)gate_dev->g,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xr, *pitch, (void *)gate_dev->xr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xs1, *pitch, (void *)gate_dev->xs1,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xs2, *pitch, (void *)gate_dev->xs2,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->zdv, *pitch, (void *)gate_dev->zdv,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->ydv, *pitch, (void *)gate_dev->ydv,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->nai, *pitch, (void *)gate_dev->nai,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->ki, *pitch, (void *)gate_dev->ki,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->nsr, *pitch, (void *)gate_dev->nsr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->nao, *pitch, (void *)gate_dev->nao,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->ko, *pitch, (void *)gate_dev->ko,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->cao, *pitch, (void *)gate_dev->cao,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->cai, *pitch, (void *)gate_dev->cai,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->jsr, *pitch, (void *)gate_dev->jsr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->caiont, *pitch, (void *)gate_dev->caiont,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->BOOL, *pitch, (void *)gate_dev->BOOL,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->tcicr, *pitch, (void *)gate_dev->tcicr,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->tjsrol, *pitch, (void *)gate_dev->tjsrol,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->dcaiont, *pitch, (void *)gate_dev->dcaiont,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
}
void LRD_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
cudaFreeHost(gate_h->vm);
cudaFreeHost(gate_h->m);
cudaFreeHost(gate_h->h);
cudaFreeHost(gate_h->j);
cudaFreeHost(gate_h->d);
cudaFreeHost(gate_h->f);
cudaFreeHost(gate_h->b);
cudaFreeHost(gate_h->g);
cudaFreeHost(gate_h->xr);
cudaFreeHost(gate_h->xs1);
cudaFreeHost(gate_h->xs2);
cudaFreeHost(gate_h->zdv);
cudaFreeHost(gate_h->ydv);
cudaFreeHost(gate_h->nai);
cudaFreeHost(gate_h->ki);
cudaFreeHost(gate_h->nsr);
cudaFreeHost(gate_h->nao);
cudaFreeHost(gate_h->ko);
cudaFreeHost(gate_h->cao);
cudaFreeHost(gate_h->cai);
cudaFreeHost(gate_h->jsr);
cudaFreeHost(gate_h->caiont);
cudaFreeHost(gate_h->BOOL);
cudaFreeHost(gate_h->tcicr);
cudaFreeHost(gate_h->tjsrol);
cudaFreeHost(gate_h->dcaiont);
cudaFreeHost(gate_h->qp);
cudaFree(gate_dev->vm);
cudaFree(gate_dev->m);
cudaFree(gate_dev->h);
cudaFree(gate_dev->j);
cudaFree(gate_dev->d);
cudaFree(gate_dev->f);
cudaFree(gate_dev->b);
cudaFree(gate_dev->g);
cudaFree(gate_dev->xr);
cudaFree(gate_dev->xs1);
cudaFree(gate_dev->xs2);
cudaFree(gate_dev->zdv);
cudaFree(gate_dev->ydv);
cudaFree(gate_dev->nai);
cudaFree(gate_dev->ki);
cudaFree(gate_dev->nsr);
cudaFree(gate_dev->nao);
cudaFree(gate_dev->ko);
cudaFree(gate_dev->cao);
cudaFree(gate_dev->cai);
cudaFree(gate_dev->jsr);
cudaFree(gate_dev->caiont);
cudaFree(gate_dev->BOOL);
cudaFree(gate_dev->tcicr);
cudaFree(gate_dev->tjsrol);
cudaFree(gate_dev->dcaiont);
cudaFree(gate_dev->qp);
cudaFree(gate_devF->vm);
cudaFree(gate_devF->m);
cudaFree(gate_devF->h);
cudaFree(gate_devF->j);
cudaFree(gate_devF->d);
cudaFree(gate_devF->f);
cudaFree(gate_devF->b);
cudaFree(gate_devF->g);
cudaFree(gate_devF->xr);
cudaFree(gate_devF->xs1);
cudaFree(gate_devF->xs2);
cudaFree(gate_devF->zdv);
cudaFree(gate_devF->ydv);
cudaFree(gate_devF->nai);
cudaFree(gate_devF->ki);
cudaFree(gate_devF->nsr);
cudaFree(gate_devF->nao);
cudaFree(gate_devF->ko);
cudaFree(gate_devF->cao);
cudaFree(gate_devF->cai);
cudaFree(gate_devF->jsr);
cudaFree(gate_devF->caiont);
cudaFree(gate_devF->BOOL);
cudaFree(gate_devF->tcicr);
cudaFree(gate_devF->tjsrol);
cudaFree(gate_devF->dcaiont);
cudaFree(gate_devF->qp);
cudaFree(cudaMatrixINT->type);
cudaFree(cudaMatrixINT->rows);
cudaFree(cudaMatrixINT->maxnz);
cudaFree(cudaMatrixINT->csep);
cudaFree(cudaMatrixINT->jcoef);
cudaFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_LRD(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
/* declare variables */
real vm,BOOL,tcicr,tjsrol,csqn;
real LRD_RTF;
real LRD_Gkr, LRD_Gks,LRD_Gki,LRD_Gkatp;
real LRD_Ena,LRD_Etca,LRD_Ekr,LRD_Eks,LRD_Eki,LRD_Ekp;
real LRD_Ekna,LRD_Ekatp,LRD_Ekdv,LRD_Ecan,LRD_Enan;
real m,h,j,am,bm,ah,bh,aj,bj,Ina;
real d,f,dss,taud,fss,tauf,Ibarca,Ibarna,Ibark;
real fca,Ilca,Ilcana,Ilcak,Ilcatot;
real b,g,bss,taub,gss,taug,Itca;
real xr,r,xrss,tauxr,Ikr;
real xs1,xs2,xs1ss,xs2ss,tauxs1,tauxs2,Iks;
real aki,bki,kin,Ikti;
real kp,Ikp;
real Inaca;
real sigma,fnak,Inak;
real Ipca;
real Icab;
real Inab;
real pona,pov,Ikna;
real patp,gkbaratp,Ikatp;
real Ibarnsna,Ibarnsk,Insna,Insk;
real rvdv,Ito;
real azdv,bzdv,tauzdv,zdvss,zdv;
real aydv,bydv,tauydv,ydvss,ydv;
real naiont,kiont,caiont,Itotal;
/*ions*/
real nao,ko,cao;
real dnao,dko,dcao;
real nai,ki;
real dnai,dki;
real itr;
real nsr,kleak,ileak,iup,dnsr;
/* JSR CICR */
real dcaiont,caiontold;
real magrel,on,off,irelcicr;
real greljsrol,ireljsrol;
real trpn,cmdn;
real jsr,bjsr,cjsr,djsr;
/* cai update here */
real cai,catotal,bmyo,cmyo,dmyo,gpig,dcai;
real vcell,ageo,acap,vmyo,vnsr,vjsr,vcleft;
LRD_RTF = LRD_R*LRD_temp/LRD_frdy;
vm = g_dev.vm[i2d];
m = g_dev.m[i2d];
h = g_dev.h[i2d];
j = g_dev.j[i2d];
d = g_dev.d[i2d];
f = g_dev.f[i2d];
b = g_dev.b[i2d];
g = g_dev.g[i2d];
xr = g_dev.xr[i2d];
xs1 = g_dev.xs1[i2d];
xs2 = g_dev.xs2[i2d];
zdv = g_dev.zdv[i2d];
ydv = g_dev.ydv[i2d];
nai = g_dev.nai[i2d];
ki = g_dev.ki[i2d];
nsr = g_dev.nsr[i2d];
nao = g_dev.nao[i2d];
ko = g_dev.ko[i2d];
cao = g_dev.cao[i2d];
cai = g_dev.cai[i2d];
jsr = g_dev.jsr[i2d];
caiont = g_dev.caiont[i2d];
BOOL = g_dev.BOOL[i2d];
tcicr = g_dev.tcicr[i2d];
tjsrol = g_dev.tjsrol[i2d];
dcaiont = g_dev.dcaiont[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
/* Declare varying G's and E's */
LRD_Gkr = 0.02614*sqrt(ko/5.4);
LRD_Gks = 0.433*(1+0.6/(1+pow((0.000038/cai),1.4)));
LRD_Gki = 0.75*(sqrt(ko/5.4));
LRD_Gkatp = 0.000195/nicholsarea;
LRD_Ena = (LRD_RTF)*log(nao/nai);
LRD_Etca = 0.5*(LRD_RTF)*log(cao/cai);
LRD_Ekr = (LRD_RTF)*log(ko/ki);
LRD_Eks = (LRD_RTF)*log((ko+prnak*nao)/(ki+prnak*nai));
LRD_Eki = (LRD_RTF)*log(ko/ki);
LRD_Ekp = LRD_Eki;
LRD_Ekna = LRD_Ekr;
LRD_Ekatp = LRD_Ekr;
LRD_Ekdv = LRD_Ekr;
LRD_Ecan = LRD_Etca;
LRD_Enan = LRD_Ena;
/* Na current [15] */
am = 0.32*(vm+47.13)/(1-exp(-0.1*(vm+47.13)));
bm = 0.08*exp(-vm/11);
if (vm < -40) {
ah = 0.135*exp((80+vm)/-6.8);
bh = 3.56*exp(0.079*vm)+310000*exp(0.35*vm);
aj = (-127140*exp(0.2444*vm)-0.00003474*exp(-0.04391*vm))*((vm+37.78)/(1+exp(0.311*(vm+79.23))));
bj = (0.1212*exp(-0.01052*vm))/(1+exp(-0.1378*(vm+40.14)));
} else {
ah = 0;
bh = 1/(0.13*(1+exp((vm+10.66)/-11.1)));
aj = 0;
bj = (0.3*exp(-0.0000002535*vm))/(1+exp(-0.1*(vm+32)));
}
Ina=LRD_Gna*(m*m*m*h*j)*(vm-LRD_Ena);
/* L-type Calcium current [14,15] */
dss = 1/(1+exp(-(vm+10)/6.24));
taud = dss*(1-exp(-(vm+10)/6.24))/(0.035*(vm+10));
fss = (1/(1+exp((vm+32)/8)))+(0.6/(1+exp((50-vm)/20)));
tauf = 1/(0.0197*exp(-0.0337*0.0337*(vm+10)*(vm+10))+0.02);
Ibarca = pca*zca*zca*((vm*LRD_frdy)/(LRD_RTF))*((gacai*cai*exp((zca*vm)/(LRD_RTF))-gacao*cao)/(exp((zca*vm)/(LRD_RTF))-1));
Ibarna = pna*zna*zna*((vm*LRD_frdy)/(LRD_RTF))*((ganai*nai*exp((zna*vm)/(LRD_RTF))-ganao*nao)/(exp((zna*vm)/(LRD_RTF))-1));
Ibark = pk*zk*zk*((vm*LRD_frdy)/(LRD_RTF))*((gaki*ki*exp((zk*vm)/(LRD_RTF))-gako*ko)/(exp((zk*vm)/(LRD_RTF))-1));
fca = 1/(1+cai/kmca);
Ilca = d*f*fca*Ibarca;
Ilcana = d*f*fca*Ibarna;
Ilcak = d*f*fca*Ibark;
Ilcatot = Ilca+Ilcana+Ilcak;
/* T-type Calcium current [13] */
bss = 1/(1+exp(-(vm+14)/10.8));
taub = 3.7+6.1/(1+exp((vm+25)/4.5));
gss = 1/(1+exp((vm+60)/5.6));
if (vm<=0) {
taug = -0.875*vm+12;
} else {
taug = 12;
}
Itca = LRD_Gtca*b*b*g*(vm-LRD_Etca);
/* K current - Rapid [13] */
xrss = 1/(1+exp(-(vm+21.5)/7.5));
tauxr = 1/(0.00138*(vm+14.2)/(1-exp(-0.123*(vm+14.2)))+0.00061*(vm+38.9)/(exp(0.145*(vm+38.9))-1));
r = 1/(1+exp((vm+9)/22.4));
Ikr = LRD_Gkr*xr*r*(vm-LRD_Ekr);
/* K current - Slow [10,13] */
xs1ss = 1/(1+exp(-(vm-1.5)/16.7));
xs2ss = xs1ss;
tauxs1 = 1/(0.0000719*(vm+30)/(1-exp(-0.148*(vm+30)))+0.000131*(vm+30)/(exp(0.0687*(vm+30))-1));
tauxs2 = 4*tauxs1;
Iks = LRD_Gks*xs1*xs2*(vm-LRD_Eks);
/* K current - Time independent [15] */
aki = 1.02/(1+exp(0.2385*(vm-LRD_Eki-59.215)));
bki = (0.49124*exp(0.08032*(vm-LRD_Eki+5.476))+exp(0.06175*(vm-LRD_Eki-594.31)))/(1+exp(-0.5143*(vm-LRD_Eki+4.753)));
kin = aki/(aki+bki);
Ikti = LRD_Gki*kin*(vm-LRD_Eki);
/* K current - Plateau [15] */
kp = 1/(1+exp((7.488-vm)/5.98));
Ikp = LRD_Gkp*kp*(vm-LRD_Ekp);
/* Na-Ca exchanger [6,14,15] */
Inaca = c1*exp((gammas-1)*vm/(LRD_RTF))*((exp(vm/(LRD_RTF))*nai*nai*nai*cao-nao*nao*nao*cai)/(1+c2*exp((gammas-1)*vm/(LRD_RTF))*(exp(vm/(LRD_RTF))*nai*nai*nai*cao+nao*nao*nao*cai)));
/* Na-K pump [15] */
sigma = (exp(nao/67.3)-1)/7;
fnak = 1/(1+0.1245*exp((-0.1*vm)/(LRD_RTF))+0.0365*sigma*exp((-vm)/(LRD_RTF)));
Inak = Ibarnak*fnak*(1/(1+kmnai*kmnai/(nai*nai)))*(ko/(ko+kmko));
/* Sarcolemmal Ca pump [15] */
Ipca = (Ibarpca*cai)/(kmpca+cai);
/* Ca background current [15] */
Icab = LRD_Gcab*(vm-LRD_Ecan);
/* Na background current [15] */
Inab = LRD_Gnab*(vm-LRD_Enan);
/* Na activated K current [6] */
pona = 0.85/(1+pow((kdkna/nai),2.8));
pov = 0.8-(0.65/(1+exp((vm+125)/15)));
Ikna = LRD_ikna*LRD_Gkna*pona*pov*(vm-LRD_Ekna);
/* ATP sensitive K current [11] */
patp = 1/(1+(pow((atpi/katp),hatp)));
gkbaratp = LRD_Gkatp*patp*(pow((ko/4),natp));
Ikatp = LRD_ikatp*gkbaratp*(vm-LRD_Ekatp);
/* Non-specific Ca-activated current [14,15] */
Ibarnsna = pnsca*zna*zna*((vm*LRD_frdy)/(LRD_RTF))*((ganai*nai*exp((zna*vm)/(LRD_RTF))-ganao*nao)/(exp((zna*vm)/(LRD_RTF))-1));
Ibarnsk = pnsca*zk*zk*((vm*LRD_frdy)/(LRD_RTF))*((gaki*ki*exp((zk*vm)/(LRD_RTF))-gako*ko)/(exp((zk*vm)/(LRD_RTF))-1));
Insna = LRD_insna*Ibarnsna/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
Insk = LRD_insk*Ibarnsk/(1+kmnsca*kmnsca*kmnsca/(cai*cai*cai));
/* Transient outward current */
rvdv = exp(vm/100);
azdv = (10*exp((vm-40)/25))/(1+exp((vm-40)/25));
bzdv = (10*exp(-(vm+90)/25))/(1+exp(-(vm+90)/25));
tauzdv = 1/(azdv+bzdv);
zdvss = azdv/(azdv+bzdv);
aydv = 0.015/(1+exp((vm+60)/5));
bydv = (0.1*exp((vm+25)/5))/(1+exp((vm+25)/5));
tauydv = 1/(aydv+bydv);
ydvss = aydv/(aydv+bydv);
Ito = LRD_ito*LRD_Gitodv*zdv*zdv*zdv*ydv*rvdv*(vm-LRD_Ekdv);
/* Summing currents (inactive currents are set to zero with activation variables) */
naiont = Ina+Inab+Ilcana+3*Inak+3*Inaca+Insna;
kiont = Ikr+Iks+Ikti+Ikp+Ilcak+-2*Inak+Insk+Ito+Ikna+Ikatp;
caiont = Ilca+Icab+Ipca-2*Inaca+Itca;
Itotal = LRD_cm*(naiont+kiont+caiont); /* uA/cm2 */
if (((t-tcicr)>80) && (vm<-30)) {
BOOL = 0;
g_dev.BOOL[i2d] = BOOL;
}
/* Put voltage update here */
fv += -Itotal;
g_devF.vm[i2d] = fv;
/* change in cleft concentration */
dnao = LRD_cleft*((nabm-nao)/taudiff+naiont*acap*LRD_cm/(vcleft*LRD_frdy));
dko = LRD_cleft*((kbm-ko)/taudiff+kiont*acap*LRD_cm/(vcleft*LRD_frdy));
dcao = LRD_cleft*((cabm-cao)/taudiff+caiont*acap*LRD_cm/(vcleft*LRD_frdy*2));
/* change in nai and ki concentration */
dnai = -LRD_cm*(naiont*acap)/(vmyo*zna*LRD_frdy); /* dnai/dt */
dki = -LRD_cm*(kiont*acap)/(vmyo*zk*LRD_frdy); /* dki/dt */
/* change in itr [14] */
itr = (nsr-jsr)/tautr;
/* change in nsr [14] */
kleak = iupbar/nsrbar;
ileak = kleak*nsr;
iup = iupbar*cai/(cai+kmup);
dnsr = (iup-ileak-itr*vjsr/vnsr); /* dnsr/dt */
/* Calcium-induced-calcium-release (CICR) criteia [6] */
if ((vm>-35) && (((caiont-caiontold)/dt)<dcaiont) && (BOOL==0)){
BOOL = 1;
tcicr = t;
g_dev.BOOL[i2d] = BOOL;
g_dev.tcicr[i2d] = tcicr; /* changes reference time */
}
on = 1/(1+exp((-(t-tcicr)+4)/.5));
off = 1-on;
magrel = 1/(1+exp(((Ilca+Icab+Ipca-2*Inaca+Itca)+5)/0.9));
irelcicr = gmaxrel*on*off*magrel*(jsr-cai);
/* JSR Calciium overload [13] */
greljsrol = grelbarjsrol*(1-exp(-(t-tjsrol)/tauon))*exp(-(t-tjsrol)/tauoff);
ireljsrol = greljsrol*(jsr-cai);
csqn = csqnbar*(jsr/(jsr+kmcsqn));
djsr = dt*(itr-irelcicr-ireljsrol);
bjsr = csqnbar-csqn-djsr-jsr+kmcsqn;
cjsr = kmcsqn*(csqn+djsr+jsr);
jsr =(sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
/* Calcium buffers in myoplasm [15] */
trpn = trpnbar*(cai/(cai+kmtrpn));
cmdn = cmdnbar*(cai/(cai+kmcmdn));
/* change in cai concentration [13] */
dcai = -dt*(((LRD_cm*caiont*acap)/(vmyo*zca*LRD_frdy))+((iup-ileak)*vnsr/vmyo)-(irelcicr*vjsr/vmyo)-(ireljsrol*vjsr/vmyo));
catotal = trpn+cmdn+dcai+cai;
bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn;
cmyo = (kmcmdn*kmtrpn)-(catotal*(kmtrpn+kmcmdn))+(trpnbar*kmcmdn)+(cmdnbar*kmtrpn);
dmyo = -kmtrpn*kmcmdn*catotal;
gpig = sqrt(bmyo*bmyo-3*cmyo);
cai = ((2*gpig/3)*cos(acos((9*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2*pow((bmyo*bmyo-3*cmyo),1.5)))/3)-(bmyo/3));
/* Calcium overload criteria [15] */
if((csqn>=csqnth) && ((t-tjsrol)>50)){
printf("Spontaneous Release occured at time %lf at node %i\n",t,i);
tjsrol = t;
g_dev.tjsrol[i2d] = tjsrol; /* changes reference time */
}
g_devF.m[i2d] = am*(1.0-m) - bm*m;
g_devF.h[i2d] = ah*(1.0-h) - bh*h;
g_devF.j[i2d] = aj*(1.0-j) - bj*j;
g_devF.d[i2d] = (dss/taud)*(1-d)-(1-dss)*(d/taud);
g_devF.f[i2d] = (fss/tauf)*(1-f)-(1-fss)*(f/tauf);
g_devF.b[i2d] = (bss/taub)*(1-b)-(1-bss)*(b/taub);
g_devF.g[i2d] = (gss/taug)*(1-g)-(1-gss)*(g/taug);
g_devF.xr[i2d] = (xrss/tauxr)*(1-xr)-(1-xrss)*(xr/tauxr);
g_devF.xs1[i2d] = (xs1ss/tauxs1)*(1-xs1)-(1-xs1ss)*(xs1/tauxs1);
g_devF.xs2[i2d] = (xs2ss/tauxs2)*(1-xs2)-(1-xs2ss)*(xs2/tauxs2);
g_devF.zdv[i2d] = (zdvss/tauzdv)*(1-zdv)-(1-zdvss)*(zdv/tauzdv);
g_devF.ydv[i2d] = (ydvss/tauydv)*(1-ydv)-(1-ydvss)*(ydv/tauydv);
g_devF.nai[i2d] = dnai;
g_devF.ki[i2d] = dki;
g_devF.nsr[i2d] = dnsr;
g_devF.nao[i2d] = dnao;
g_devF.ko[i2d] = dko;
g_devF.cao[i2d] = dcao;
/* assign Temp variables to memory */
g_devF.caiont[i2d] = caiont;
g_devF.cai[i2d] = cai;
g_devF.jsr[i2d] = jsr;
g_devF.dcaiont[i2d] = (caiont-caiontold)/dt;
} |
f0253927861febabb9c93d9ae66a1061b00ff1fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef DEDISPERSE_KERNEL_H_
#define DEDISPERSE_KERNEL_H_
#include <assert.h>
#include <iostream>
#include "stdio.h"
#include "DedispersionParameters.h"
// Stores temporary shift values
//__device__ __constant__ float dm_shifts[8192];
//__device__ __constant__ int i_nsamp, i_maxshift, i_nchans;
//__device__ __shared__ float f_line[ARRAYSIZE];
//{{{ global_for_time_dedisperse_loop
__global__ void cache_dedisperse_loop(float *outbuff, float *buff, float mstartdm,
float mdmstep, const float* dm_shifts,
const int i_nsamp, const int i_maxshift,
const int i_nchans )
{
int shift;
float local_kernel_t[NUMREG];
int t = blockIdx.x * NUMREG * DIVINT + threadIdx.x;
// Initialise the time accumulators
for(int i = 0; i < NUMREG; i++) local_kernel_t[i] = 0.0f;
float shift_temp = mstartdm + ((blockIdx.y * DIVINDM + threadIdx.y) * mdmstep);
// Loop over the frequency channels.
for(int c = 0; c < i_nchans; c++) {
// Calculate the initial shift for this given frequency
// channel (c) at the current despersion measure (dm)
// ** dm is constant for this thread!!**
shift = (c * i_nsamp + t) + __float2int_rz (dm_shifts[c] * shift_temp);
#pragma unroll
for(int i = 0; i < NUMREG; i++) {
local_kernel_t[i] += buff[shift + (i * DIVINT) ];
//local_kernel_t[i] += __ldg(&buff[shift + (i * DIVINT) ]);
}
}
// Write the accumulators to the output array.
#pragma unroll
for(int i = 0; i < NUMREG; i++) {
outbuff[((blockIdx.y * DIVINDM) + threadIdx.y)* (i_nsamp-i_maxshift) + (i * DIVINT) + (NUMREG * DIVINT * blockIdx.x) + threadIdx.x] = local_kernel_t[i];
}
}
/// C Wrapper for brute-force algo
extern "C" void cacheDedisperseLoop( float *outbuff, long outbufSize, float *buff, float mstartdm,
float mdmstep, int tdms, int numSamples,
const float* dmShift,
const int maxshift,
const int i_nchans ) {
hipMemset(outbuff, 0, outbufSize );
int divisions_in_t = DIVINT;
int divisions_in_dm = DIVINDM - (tdms%DIVINDM); // ensure divides exactly into
// our dm parameter space
int num_reg = NUMREG;
int num_blocks_t = (numSamples - maxshift)/(divisions_in_t * num_reg);
int num_blocks_dm = tdms/divisions_in_dm;
/*
std::cout << "\nnumSamples\t" << numSamples << std::endl;
std::cout << "\ndivisions_in_t\t" << divisions_in_t << std::endl;
std::cout << "\ndivisions_in_dm\t" << divisions_in_dm << std::endl;
std::cout << "\nnum_reg\t" << num_reg << std::endl;
std::cout << "\nnum_blocks_t\t" << num_blocks_t << std::endl;
std::cout << "\nnum_blocks_dm\t" << num_blocks_dm << std::endl;
std::cout << "\ntdms\t" << tdms << std::endl;
std::cout << "mdmstep\t" << mdmstep << std::endl;
std::cout << "mstartdm\t" << mstartdm << std::endl;
std::cout << "buff\t" << buff << std::endl;
std::cout << "outbuff\t" << outbuff << std::endl;
*/
dim3 threads_per_block(divisions_in_t, divisions_in_dm);
dim3 num_blocks(num_blocks_t,num_blocks_dm);
hipLaunchKernelGGL(( cache_dedisperse_loop), dim3(num_blocks), dim3(threads_per_block) , 0, 0, outbuff, buff,
mstartdm, mdmstep, dmShift, numSamples, maxshift, i_nchans );
}
#endif
| f0253927861febabb9c93d9ae66a1061b00ff1fc.cu | #ifndef DEDISPERSE_KERNEL_H_
#define DEDISPERSE_KERNEL_H_
#include <assert.h>
#include <iostream>
#include "stdio.h"
#include "DedispersionParameters.h"
// Stores temporary shift values
//__device__ __constant__ float dm_shifts[8192];
//__device__ __constant__ int i_nsamp, i_maxshift, i_nchans;
//__device__ __shared__ float f_line[ARRAYSIZE];
//{{{ global_for_time_dedisperse_loop
__global__ void cache_dedisperse_loop(float *outbuff, float *buff, float mstartdm,
float mdmstep, const float* dm_shifts,
const int i_nsamp, const int i_maxshift,
const int i_nchans )
{
int shift;
float local_kernel_t[NUMREG];
int t = blockIdx.x * NUMREG * DIVINT + threadIdx.x;
// Initialise the time accumulators
for(int i = 0; i < NUMREG; i++) local_kernel_t[i] = 0.0f;
float shift_temp = mstartdm + ((blockIdx.y * DIVINDM + threadIdx.y) * mdmstep);
// Loop over the frequency channels.
for(int c = 0; c < i_nchans; c++) {
// Calculate the initial shift for this given frequency
// channel (c) at the current despersion measure (dm)
// ** dm is constant for this thread!!**
shift = (c * i_nsamp + t) + __float2int_rz (dm_shifts[c] * shift_temp);
#pragma unroll
for(int i = 0; i < NUMREG; i++) {
local_kernel_t[i] += buff[shift + (i * DIVINT) ];
//local_kernel_t[i] += __ldg(&buff[shift + (i * DIVINT) ]);
}
}
// Write the accumulators to the output array.
#pragma unroll
for(int i = 0; i < NUMREG; i++) {
outbuff[((blockIdx.y * DIVINDM) + threadIdx.y)* (i_nsamp-i_maxshift) + (i * DIVINT) + (NUMREG * DIVINT * blockIdx.x) + threadIdx.x] = local_kernel_t[i];
}
}
/// C Wrapper for brute-force algo
extern "C" void cacheDedisperseLoop( float *outbuff, long outbufSize, float *buff, float mstartdm,
float mdmstep, int tdms, int numSamples,
const float* dmShift,
const int maxshift,
const int i_nchans ) {
cudaMemset(outbuff, 0, outbufSize );
int divisions_in_t = DIVINT;
int divisions_in_dm = DIVINDM - (tdms%DIVINDM); // ensure divides exactly into
// our dm parameter space
int num_reg = NUMREG;
int num_blocks_t = (numSamples - maxshift)/(divisions_in_t * num_reg);
int num_blocks_dm = tdms/divisions_in_dm;
/*
std::cout << "\nnumSamples\t" << numSamples << std::endl;
std::cout << "\ndivisions_in_t\t" << divisions_in_t << std::endl;
std::cout << "\ndivisions_in_dm\t" << divisions_in_dm << std::endl;
std::cout << "\nnum_reg\t" << num_reg << std::endl;
std::cout << "\nnum_blocks_t\t" << num_blocks_t << std::endl;
std::cout << "\nnum_blocks_dm\t" << num_blocks_dm << std::endl;
std::cout << "\ntdms\t" << tdms << std::endl;
std::cout << "mdmstep\t" << mdmstep << std::endl;
std::cout << "mstartdm\t" << mstartdm << std::endl;
std::cout << "buff\t" << buff << std::endl;
std::cout << "outbuff\t" << outbuff << std::endl;
*/
dim3 threads_per_block(divisions_in_t, divisions_in_dm);
dim3 num_blocks(num_blocks_t,num_blocks_dm);
cache_dedisperse_loop<<< num_blocks, threads_per_block >>>( outbuff, buff,
mstartdm, mdmstep, dmShift, numSamples, maxshift, i_nchans );
}
#endif
|
9aba147809b201cb0c77dfa3b65e4c0e3411f5f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| 9aba147809b201cb0c77dfa3b65e4c0e3411f5f6.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
775aaf340b4109cd9e11eea4ffd5b55f146c686d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zjacobisetup.cu normal z -> d, Wed Sep 17 15:08:43 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
dvjacobisetup_gpu( int num_rows,
double *b,
double *d,
double *c,
double *x){
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
c[row] = b[row] / d[row];
x[row] = c[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
b magma_d_vector
RHS b
@param
d magma_d_vector
vector with diagonal entries
@param
c magma_d_vector*
c = D^(-1) * b
@param
x magma_d_vector*
iteration vector
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_djacobisetup_vector_gpu( int num_rows,
double *b,
double *d,
double *c,
double *x ){
dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( dvjacobisetup_gpu), dim3(grid), dim3(BLOCK_SIZE), 0 , 0, num_rows, b, d, c, x );
return MAGMA_SUCCESS;
}
__global__ void
djacobidiagscal_kernel( int num_rows,
double *b,
double *d,
double *c){
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
c[row] = b[row] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
b magma_d_vector
RHS b
@param
d magma_d_vector
vector with diagonal entries
@param
c magma_d_vector*
c = D^(-1) * b
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobi_diagscal( int num_rows,
double *b,
double *d,
double *c){
dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( djacobidiagscal_kernel), dim3(grid), dim3(BLOCK_SIZE), 0 , 0, num_rows, b, d, c );
return MAGMA_SUCCESS;
}
| 775aaf340b4109cd9e11eea4ffd5b55f146c686d.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zjacobisetup.cu normal z -> d, Wed Sep 17 15:08:43 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
dvjacobisetup_gpu( int num_rows,
double *b,
double *d,
double *c,
double *x){
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
c[row] = b[row] / d[row];
x[row] = c[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
b magma_d_vector
RHS b
@param
d magma_d_vector
vector with diagonal entries
@param
c magma_d_vector*
c = D^(-1) * b
@param
x magma_d_vector*
iteration vector
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_djacobisetup_vector_gpu( int num_rows,
double *b,
double *d,
double *c,
double *x ){
dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
dvjacobisetup_gpu<<< grid, BLOCK_SIZE, 0 >>>( num_rows, b, d, c, x );
return MAGMA_SUCCESS;
}
__global__ void
djacobidiagscal_kernel( int num_rows,
double *b,
double *d,
double *c){
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
c[row] = b[row] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
b magma_d_vector
RHS b
@param
d magma_d_vector
vector with diagonal entries
@param
c magma_d_vector*
c = D^(-1) * b
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobi_diagscal( int num_rows,
double *b,
double *d,
double *c){
dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
djacobidiagscal_kernel<<< grid, BLOCK_SIZE, 0 >>>( num_rows, b, d, c );
return MAGMA_SUCCESS;
}
|
f6bebac41e5c1b299476dbd814afd2a4801a7110.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void interpolate(float * x, float * y, float a, float * k, int n){
int i,j;
i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float ss[100], ts[100], ks[100];
if(i<n)
{
ss[i]=1;
ts[i]=1;
__syncthreads();
for(j=0;j<n;j++)
{
if(j!=i)
{
ss[i]=ss[i]*(a-x[j]);
ts[i]=ts[i]*(x[i]-x[j]);
}
}
ks[i]=(((ss[i])/(ts[i]))*y[i]);
__syncthreads();
if(i==0){
for(i=0;i<(n*n);i++){
*k += ks[i];
}
}
}
}
int main()
{
float *x, *y, *d_x, *d_y;
float a, k, *d_k;
size_t size = 100 * sizeof(float);
hipEvent_t start, stop;
x = new float[100];
y = new float[100];
hipMalloc(&d_x, size);
hipMalloc(&d_y, size);
hipMalloc(&d_k, sizeof(float));
hipEventCreate(&start);
hipEventCreate(&stop);
int N,i,d=1;
printf("\n\n Enter the number of the terms of the table: ");
scanf("%d",&N);
printf("%d", N);
printf("\n\n Enter the respective values of the variables x and y: \n");
for(i=0; i<N; i++)
{
scanf ("%f",&x[i]);
scanf("%f",&y[i]);
}
printf("\n\n The table you entered is as follows :\n\n");
for(i=0; i<N; i++)
{
printf("%0.3f\t%0.3f",x[i],y[i]);
printf("\n");
}
hipMemcpy(d_x, x, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, y, size, hipMemcpyHostToDevice);
//while(d==1)
//{
printf(" \n\n\n Enter the value of the x to find the respective value of y\n\n\n");
scanf("%f",&a);
printf("%f\n",a);
int threads_per_block = 32; // A 16 x 16 block threads
int number_of_blocks = N/threads_per_block + 1;
//dim3 threads_per_block(3, 3); // A 16 x 16 block threads
//dim3 number_of_blocks(N/threads_per_block.x + 1, N/threads_per_block.y + 1);
hipEventRecord(start);
hipLaunchKernelGGL(( interpolate), dim3(number_of_blocks),dim3(threads_per_block), 0, 0, d_x, d_y, a, d_k, N);
hipError_t err;
err = hipGetLastError(); // `hipGetLastError` will return the error from above.
if (err != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(err));
}
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(&k, d_k, sizeof(float), hipMemcpyDeviceToHost);
printf("\n\n The respective value of the variable y is: %f\n", k);
printf(" Elapsed time in milliseconds: %f\n", milliseconds);
printf("\n\n Do you want to continue?\n\n Press 1 to continue and any other key to exit");
scanf("%d",&d);
//}
delete [] x;
delete [] y;
hipFree(d_x);
hipFree(d_y);
}
| f6bebac41e5c1b299476dbd814afd2a4801a7110.cu | #include<stdio.h>
__global__ void interpolate(float * x, float * y, float a, float * k, int n){
int i,j;
i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float ss[100], ts[100], ks[100];
if(i<n)
{
ss[i]=1;
ts[i]=1;
__syncthreads();
for(j=0;j<n;j++)
{
if(j!=i)
{
ss[i]=ss[i]*(a-x[j]);
ts[i]=ts[i]*(x[i]-x[j]);
}
}
ks[i]=(((ss[i])/(ts[i]))*y[i]);
__syncthreads();
if(i==0){
for(i=0;i<(n*n);i++){
*k += ks[i];
}
}
}
}
int main()
{
float *x, *y, *d_x, *d_y;
float a, k, *d_k;
size_t size = 100 * sizeof(float);
cudaEvent_t start, stop;
x = new float[100];
y = new float[100];
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_k, sizeof(float));
cudaEventCreate(&start);
cudaEventCreate(&stop);
int N,i,d=1;
printf("\n\n Enter the number of the terms of the table: ");
scanf("%d",&N);
printf("%d", N);
printf("\n\n Enter the respective values of the variables x and y: \n");
for(i=0; i<N; i++)
{
scanf ("%f",&x[i]);
scanf("%f",&y[i]);
}
printf("\n\n The table you entered is as follows :\n\n");
for(i=0; i<N; i++)
{
printf("%0.3f\t%0.3f",x[i],y[i]);
printf("\n");
}
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
//while(d==1)
//{
printf(" \n\n\n Enter the value of the x to find the respective value of y\n\n\n");
scanf("%f",&a);
printf("%f\n",a);
int threads_per_block = 32; // A 16 x 16 block threads
int number_of_blocks = N/threads_per_block + 1;
//dim3 threads_per_block(3, 3); // A 16 x 16 block threads
//dim3 number_of_blocks(N/threads_per_block.x + 1, N/threads_per_block.y + 1);
cudaEventRecord(start);
interpolate<<<number_of_blocks,threads_per_block>>>(d_x, d_y, a, d_k, N);
cudaError_t err;
err = cudaGetLastError(); // `cudaGetLastError` will return the error from above.
if (err != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(&k, d_k, sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\n The respective value of the variable y is: %f\n", k);
printf(" Elapsed time in milliseconds: %f\n", milliseconds);
printf("\n\n Do you want to continue?\n\n Press 1 to continue and any other key to exit");
scanf("%d",&d);
//}
delete [] x;
delete [] y;
cudaFree(d_x);
cudaFree(d_y);
}
|
917e62db601d425b7c1e4a427b1c575efa74864e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <iostream>
#include <pperm.hh>
__global__ void genperm_lex_device(int n, int prefix_len, int* counter) {
int task_idx = threadIdx.x + blockIdx.x * blockDim.x;
int perm_s = 0;
int8_t a[MAX_N];
if (idx2prefix(n, prefix_len, task_idx, a)) {
while (true) {
++perm_s;
bool Flag = false;
for (int i(n - 2); i >= 0; i--)
if (a[i] < a[i + 1]) {
int x = i + 1;
for (int j(i + 2); j < n; j++)
if (a[j] > a[i] && a[j] < a[x]) x = j;
Flag = true;
swap(a[i], a[x]);
int R = n - 1;
for (int L(i + 1); L < R;) swap(a[L++], a[R--]);
break;
}
if (!Flag) break;
}
}
#pragma unroll
for (int i = 1; i < 32; i <<= 1) {
perm_s += __shfl_sync(FULL_MASK, perm_s, (threadIdx.x + i) % 32, 32);
}
if (threadIdx.x == 0) {
counter[blockIdx.x] = 0;
}
__syncthreads();
if (threadIdx.x % 32 == 0) {
atomicAdd(counter + blockIdx.x, perm_s);
}
}
class LexGpu: public PermAlgorithm<LexGpu> {
GENERATE_CONSTRUCTOR(LexGpu)
private:
GPU_ALGO_ARGS
protected:
void setup_() override {
SETUP_GPU_ALGO()
}
public:
template <typename F>
void do_generate_(F&& callback) {
LAUNCH_GPU_ALGO(genperm_lex_device);
}
};
REGISTER_PERM_ALGORITHM("lex_gpu", LexGpu)
| 917e62db601d425b7c1e4a427b1c575efa74864e.cu | #include <algorithm>
#include <iostream>
#include <pperm.hh>
__global__ void genperm_lex_device(int n, int prefix_len, int* counter) {
int task_idx = threadIdx.x + blockIdx.x * blockDim.x;
int perm_s = 0;
int8_t a[MAX_N];
if (idx2prefix(n, prefix_len, task_idx, a)) {
while (true) {
++perm_s;
bool Flag = false;
for (int i(n - 2); i >= 0; i--)
if (a[i] < a[i + 1]) {
int x = i + 1;
for (int j(i + 2); j < n; j++)
if (a[j] > a[i] && a[j] < a[x]) x = j;
Flag = true;
swap(a[i], a[x]);
int R = n - 1;
for (int L(i + 1); L < R;) swap(a[L++], a[R--]);
break;
}
if (!Flag) break;
}
}
#pragma unroll
for (int i = 1; i < 32; i <<= 1) {
perm_s += __shfl_sync(FULL_MASK, perm_s, (threadIdx.x + i) % 32, 32);
}
if (threadIdx.x == 0) {
counter[blockIdx.x] = 0;
}
__syncthreads();
if (threadIdx.x % 32 == 0) {
atomicAdd(counter + blockIdx.x, perm_s);
}
}
class LexGpu: public PermAlgorithm<LexGpu> {
GENERATE_CONSTRUCTOR(LexGpu)
private:
GPU_ALGO_ARGS
protected:
void setup_() override {
SETUP_GPU_ALGO()
}
public:
template <typename F>
void do_generate_(F&& callback) {
LAUNCH_GPU_ALGO(genperm_lex_device);
}
};
REGISTER_PERM_ALGORITHM("lex_gpu", LexGpu)
|
b7ebf1420a9562536fd3c48c110b95ebd49f6e52.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 256
#define ELTS_PER_THREAD 4
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__global__ void d_compact(float * d_classDistrTable, size_t * d_addr, bool * d_flag, float * d_buffer, size_t data_size) {
unsigned int iGlobal = blockIdx.x * (blockDim.x << 2) + threadIdx.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
iGlobal += blockDim.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
iGlobal += blockDim.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
iGlobal += blockDim.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
}
void compactCPU(float * h_classDistrTable, size_t * h_addr, bool * h_flag, size_t data_size) {
float * h_buffer;
h_buffer = (float *) malloc(data_size * sizeof(float));
for (size_t idx = 0; idx < data_size; ++idx) {
if (h_flag[idx] == true) {
h_buffer[h_addr[idx]] = h_classDistrTable[idx];
};
}
memcpy(h_classDistrTable, h_buffer, sizeof(float) * data_size);
free(h_buffer);
}
void compactGPU(float * h_classDistrTable, size_t * h_addr, bool * h_flag, size_t data_size) {
size_t num_blocks max(1, (int)ceil((float)data_size /
((float)ELTS_PER_THREAD * BLOCK_SIZE)));
float * d_classDistrTable;
size_t * d_addr;
bool * d_flag;
float * d_buffer;
cudaCheckError( hipMalloc(&d_classDistrTable, sizeof(float) * data_size) );
cudaCheckError( hipMalloc(&d_addr, sizeof(size_t) * data_size) );
cudaCheckError( hipMalloc(&d_flag, sizeof(bool) * data_size) );
cudaCheckError( hipMalloc(&d_buffer, sizeof(float) * data_size) );
hipMemcpy(d_classDistrTable, h_classDistrTable, data_size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_addr, h_addr, data_size * sizeof(size_t), hipMemcpyHostToDevice);
hipMemcpy(d_flag, h_flag, data_size * sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_compact), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, d_classDistrTable, d_addr, d_flag, d_buffer, data_size);
hipMemcpy(h_classDistrTable, d_buffer, data_size * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_classDistrTable);
hipFree(d_addr);
hipFree(d_flag);
hipFree(d_buffer);
}
bool randomBool() {
return rand() % 2 == 1;
}
int main(int argc, char * argv[]) {
float * classDistrTable;
size_t * addr;
bool * flag;
size_t data_size = atoi(argv[1]);
classDistrTable = (float *) malloc(data_size * sizeof(float));
addr = (size_t *) malloc(data_size * sizeof(size_t));
flag = (bool *) malloc(data_size * sizeof(bool));
for (size_t idx = 0; idx < data_size; ++idx) {
classDistrTable[idx] = 1.0 * idx;
addr[idx] = (idx + rand()) % data_size;
flag[idx] = randomBool();
}
float milliseconds = 0;
float milliseconds_squared = 0;
float diff = 0;
int N = 100;
for (size_t idx = 0; idx < N; ++idx){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
const clock_t begin_time = clock();
compactGPU(classDistrTable, addr, flag, data_size);
diff = float(clock() - begin_time) / 1000;
milliseconds += diff;
milliseconds_squared += diff * diff;
}
std::cout << "mean=" << milliseconds / N << " std=" << (milliseconds_squared - milliseconds * milliseconds / N) / N << std::endl;
milliseconds = 0;
milliseconds_squared = 0;
N = 100;
for (size_t idx = 0; idx < N; ++idx){
const clock_t begin_time = clock();
compactCPU(classDistrTable, addr, flag, data_size);
diff = float(clock() - begin_time) / 1000;
milliseconds += diff;
milliseconds_squared += diff * diff;
}
std::cout << "mean=" << milliseconds / N << " std=" << (milliseconds_squared - milliseconds * milliseconds / N) / N << std::endl;
return 0;
}
| b7ebf1420a9562536fd3c48c110b95ebd49f6e52.cu | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#define BLOCK_SIZE 256
#define ELTS_PER_THREAD 4
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__global__ void d_compact(float * d_classDistrTable, size_t * d_addr, bool * d_flag, float * d_buffer, size_t data_size) {
unsigned int iGlobal = blockIdx.x * (blockDim.x << 2) + threadIdx.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
iGlobal += blockDim.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
iGlobal += blockDim.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
iGlobal += blockDim.x;
if (iGlobal < data_size && d_flag[iGlobal] == true) {
d_buffer[d_addr[iGlobal]] = d_classDistrTable[iGlobal];
}
}
void compactCPU(float * h_classDistrTable, size_t * h_addr, bool * h_flag, size_t data_size) {
float * h_buffer;
h_buffer = (float *) malloc(data_size * sizeof(float));
for (size_t idx = 0; idx < data_size; ++idx) {
if (h_flag[idx] == true) {
h_buffer[h_addr[idx]] = h_classDistrTable[idx];
};
}
memcpy(h_classDistrTable, h_buffer, sizeof(float) * data_size);
free(h_buffer);
}
void compactGPU(float * h_classDistrTable, size_t * h_addr, bool * h_flag, size_t data_size) {
size_t num_blocks max(1, (int)ceil((float)data_size /
((float)ELTS_PER_THREAD * BLOCK_SIZE)));
float * d_classDistrTable;
size_t * d_addr;
bool * d_flag;
float * d_buffer;
cudaCheckError( cudaMalloc(&d_classDistrTable, sizeof(float) * data_size) );
cudaCheckError( cudaMalloc(&d_addr, sizeof(size_t) * data_size) );
cudaCheckError( cudaMalloc(&d_flag, sizeof(bool) * data_size) );
cudaCheckError( cudaMalloc(&d_buffer, sizeof(float) * data_size) );
cudaMemcpy(d_classDistrTable, h_classDistrTable, data_size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_addr, h_addr, data_size * sizeof(size_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_flag, h_flag, data_size * sizeof(bool), cudaMemcpyHostToDevice);
d_compact<<<num_blocks, BLOCK_SIZE>>>(d_classDistrTable, d_addr, d_flag, d_buffer, data_size);
cudaMemcpy(h_classDistrTable, d_buffer, data_size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_classDistrTable);
cudaFree(d_addr);
cudaFree(d_flag);
cudaFree(d_buffer);
}
bool randomBool() {
return rand() % 2 == 1;
}
int main(int argc, char * argv[]) {
float * classDistrTable;
size_t * addr;
bool * flag;
size_t data_size = atoi(argv[1]);
classDistrTable = (float *) malloc(data_size * sizeof(float));
addr = (size_t *) malloc(data_size * sizeof(size_t));
flag = (bool *) malloc(data_size * sizeof(bool));
for (size_t idx = 0; idx < data_size; ++idx) {
classDistrTable[idx] = 1.0 * idx;
addr[idx] = (idx + rand()) % data_size;
flag[idx] = randomBool();
}
float milliseconds = 0;
float milliseconds_squared = 0;
float diff = 0;
int N = 100;
for (size_t idx = 0; idx < N; ++idx){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
const clock_t begin_time = clock();
compactGPU(classDistrTable, addr, flag, data_size);
diff = float(clock() - begin_time) / 1000;
milliseconds += diff;
milliseconds_squared += diff * diff;
}
std::cout << "mean=" << milliseconds / N << " std=" << (milliseconds_squared - milliseconds * milliseconds / N) / N << std::endl;
milliseconds = 0;
milliseconds_squared = 0;
N = 100;
for (size_t idx = 0; idx < N; ++idx){
const clock_t begin_time = clock();
compactCPU(classDistrTable, addr, flag, data_size);
diff = float(clock() - begin_time) / 1000;
milliseconds += diff;
milliseconds_squared += diff * diff;
}
std::cout << "mean=" << milliseconds / N << " std=" << (milliseconds_squared - milliseconds * milliseconds / N) / N << std::endl;
return 0;
}
|
c693e74aee26e3fdb9aeacd2a76bc3b2c8ff3cc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "graphic.h"
#define TX 32
#define TY 32
__device__ float2 vectorScale(float2 vec, float size) {
float s = vec.x * vec.x + vec.y*vec.y;
s = sqrt(s);
vec.x = vec.x / s * size;
vec.y = vec.y / s * size;
return vec;
}
__device__ float2 velocity(POS pos, POS speed, int screenW, int screenH) {
float2 vel;
float2 vec, gravity;
float x, y, c;
x = pos.x - screenW / 2;
y = pos.y - screenH / 2;
gravity.x = -x; gravity.y = -y;
gravity = vectorScale(gravity, speed.y);
// (x y )
if (x == 0) {//y
vec.x = speed.x;
if (y > 0)
vec.x *= -1;
vec.y = 0;
}
else if (y == 0) {//x
vec.x = 0;
vec.y = speed.x;
if (x > 0)
vec.y *= -1;
}
else if (x > 0 && y > 0 || x < 0 && y < 0) {//1, 3. (c,0)
c = (x * x + y * y) / x;
vec.x = c - x;
vec.y = -y;
vec = vectorScale(vec, speed.x);
}
else if (x > 0 && y < 0 || x < 0 && y > 0) {//2, 4. (0,c),
c = (x * x + y * y) / y;
vec.x = -x;
vec.y = c - y;
vec = vectorScale(vec, speed.x);
}
//if (x < speed.x && y < speed.x && x > -speed.x && y > -speed.x) {
// gravity.x *= -100;
// gravity.y *= -100;
//}
vel.x = vec.x + gravity.x;
vel.y = vec.y + gravity.y;
return vel;
}
//cpu , GPU .
__global__ void starKernel(POS *pos, POS* speed, int w, int h, int screenW, int screenH)
{
const int c = blockIdx.x * blockDim.x + threadIdx.x; // id * + thread id
const int r = blockIdx.y * blockDim.y + threadIdx.y; // id * + thread id
//thread 500*500 , 512*512 .(TX, TY )
// .
if ((c >= w) || (r >= h)) return;
const int i = r * w + c; // thread (id)
float2 vel = velocity(pos[i],speed[i], screenW, screenH);
pos[i].x += vel.x ;
pos[i].y += vel.y ;
/*pos[i].x += vel[i].x;
pos[i].y += vel[i].y;
if (pos[i].x > screenW || pos[i].x < 0)
pos[i].x = screenW / 2;
if (pos[i].y > screenH || pos[i].y < 0)
pos[i].y = screenH / 2;*/
}
// CPU .
void kernelLauncher_star(POS *pos, POS* vel, int w, int h, int screenW, int screenH) {
// . TX, TY
const dim3 blockSize(TX, TY);
//grid, thread block . grid.x * grid.y .
const dim3 gridSize = dim3((w + TX - 1) / TX, (h + TY - 1) / TY);
// . blockSize thread block, gridSize .
// GPU , , ,
//starKernel << <gridSize, blockSize >> > (pos, vel, w, h);
starKernel KERNEL_ARGS2(gridSize, blockSize) (pos, vel, w, h, screenW , screenH);
} | c693e74aee26e3fdb9aeacd2a76bc3b2c8ff3cc4.cu | #include "kernel.h"
#include "graphic.h"
#define TX 32
#define TY 32
__device__ float2 vectorScale(float2 vec, float size) {
float s = vec.x * vec.x + vec.y*vec.y;
s = sqrt(s);
vec.x = vec.x / s * size;
vec.y = vec.y / s * size;
return vec;
}
__device__ float2 velocity(POS pos, POS speed, int screenW, int screenH) {
float2 vel;
float2 vec, gravity;
float x, y, c;
x = pos.x - screenW / 2;
y = pos.y - screenH / 2;
gravity.x = -x; gravity.y = -y;
gravity = vectorScale(gravity, speed.y);
//원점을 중심으로 직각으로 움직이는 방향의 벡터(x축이나 y축까지 이동)
if (x == 0) {//y축
vec.x = speed.x;
if (y > 0)
vec.x *= -1;
vec.y = 0;
}
else if (y == 0) {//x축
vec.x = 0;
vec.y = speed.x;
if (x > 0)
vec.y *= -1;
}
else if (x > 0 && y > 0 || x < 0 && y < 0) {//1, 3사분면. (c,0)
c = (x * x + y * y) / x;
vec.x = c - x;
vec.y = -y;
vec = vectorScale(vec, speed.x);
}
else if (x > 0 && y < 0 || x < 0 && y > 0) {//2, 4사분면. (0,c),
c = (x * x + y * y) / y;
vec.x = -x;
vec.y = c - y;
vec = vectorScale(vec, speed.x);
}
//if (x < speed.x && y < speed.x && x > -speed.x && y > -speed.x) {
// gravity.x *= -100;
// gravity.y *= -100;
//}
vel.x = vec.x + gravity.x;
vel.y = vec.y + gravity.y;
return vel;
}
//cpu에서 호출 가능한, GPU에서 동작하는 커널 함수.
__global__ void starKernel(POS *pos, POS* speed, int w, int h, int screenW, int screenH)
{
const int c = blockIdx.x * blockDim.x + threadIdx.x; //블록의 id * 블록의 수 + 블록 내에서의 thread id
const int r = blockIdx.y * blockDim.y + threadIdx.y; //블록의 id * 블록의 수 + 블록 내에서의 thread id
//thread는 500*500 개 실행되는게 아니라, 512*512개 실행된다.(TX, TY의 배수)
//원하는 범위 밖의 데이터는 계산하지 않는다.
if ((c >= w) || (r >= h)) return;
const int i = r * w + c; //전체 thread 에서의 순서(id)
float2 vel = velocity(pos[i],speed[i], screenW, screenH);
pos[i].x += vel.x ;
pos[i].y += vel.y ;
/*pos[i].x += vel[i].x;
pos[i].y += vel[i].y;
if (pos[i].x > screenW || pos[i].x < 0)
pos[i].x = screenW / 2;
if (pos[i].y > screenH || pos[i].y < 0)
pos[i].y = screenH / 2;*/
}
//커널을 호출하는 CPU 함수.
void kernelLauncher_star(POS *pos, POS* vel, int w, int h, int screenW, int screenH) {
//블록의 크기. 가로가 TX개, 세로가 TY개
const dim3 blockSize(TX, TY);
//grid, 즉 thread block의 수. 블록이 grid.x * grid.y 개 존재.
const dim3 gridSize = dim3((w + TX - 1) / TX, (h + TY - 1) / TY);
//커널 함수 호출. blockSize 크기의 thread block을, gridSize 만큼 사용한다.
//공통된 함수 인자로는 GPU 메모리 포인터, 가로, 세로, 기준점
//starKernel << <gridSize, blockSize >> > (pos, vel, w, h);
starKernel KERNEL_ARGS2(gridSize, blockSize) (pos, vel, w, h, screenW , screenH);
} |
517a5f5354bfaaa9c9439058a4532bf959f5a631.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2020 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "hip/hip_runtime.h"
class GPUAccumulateCharge {
public:
typedef float LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
#ifdef __CUDA_ARCH__
float *target = &lhs;
atomicAdd(target,rhs);
#else
assert(false);
#endif
}
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
#ifdef __CUDA_ARCH__
float *target = &rhs1;
atomicAdd(target,rhs2);
#else
assert(false);
#endif
}
};
template<typename AT, int SEGMENTS>
struct SegmentAccessors {
public:
__host__ __device__
inline AT& operator[](unsigned index) { return accessors[index]; }
__host__ __device__
inline const AT& operator[](unsigned index) const { return accessors[index]; }
public:
AT accessors[SEGMENTS];
};
__device__ __forceinline__
float find_node_voltage(const AccessorROfloat &pvt,
const AccessorROfloat &shr,
const AccessorROfloat &ghost,
Point<1> ptr, PointerLocation loc)
{
switch (loc)
{
case PRIVATE_PTR:
return pvt[ptr];
case SHARED_PTR:
return shr[ptr];
case GHOST_PTR:
return ghost[ptr];
default:
break; // assert(false);
}
return 0.f;
}
__global__
void calc_new_currents_kernel(Point<1> first,
int num_wires,
float dt,
int steps,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_inductance,
const AccessorROfloat fa_resistance,
const AccessorROfloat fa_wire_cap,
const AccessorROfloat fa_pvt_voltage,
const AccessorROfloat fa_shr_voltage,
const AccessorROfloat fa_ghost_voltage,
const SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS> fa_currents,
const SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS-1> fa_voltages)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We can do this because we know we have SOA layout and wires are dense
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float recip_dt = 1.f/dt;
float temp_v[WIRE_SEGMENTS+1];
float temp_i[WIRE_SEGMENTS];
float old_i[WIRE_SEGMENTS];
float old_v[WIRE_SEGMENTS-1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = fa_currents[i][wire_ptr];
old_i[i] = temp_i[i];
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = fa_voltages[i][wire_ptr];
old_v[i] = temp_v[i+1];
}
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
temp_v[0] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
temp_v[WIRE_SEGMENTS] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
// Solve the RLC model iteratively
float inductance = fa_inductance[wire_ptr];
float recip_resistance = 1.f/fa_resistance[wire_ptr];
float recip_capacitance = 1.f/fa_wire_cap[wire_ptr];
for (int j = 0; j < steps; j++)
{
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = ((temp_v[i] - temp_v[i+1]) -
(inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the result
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i][wire_ptr] = temp_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i][wire_ptr] = temp_v[i+1];
}
}
/*static*/
__host__
void CalcNewCurrentsTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
// the segment accessors don't need to pay for bounds checks because
// other wire accessors below will use the same bounds and be checked
// first
SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS> fa_currents;
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i] = AccessorRWfloat_nobounds(regions[0], FID_CURRENT+i);
SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS-1> fa_voltages;
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i] = AccessorRWfloat_nobounds(regions[0], FID_WIRE_VOLTAGE+i);
const AccessorROpoint fa_in_ptr(regions[1], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[1], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[1], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[1], FID_OUT_LOC);
const AccessorROfloat fa_inductance(regions[1], FID_INDUCTANCE);
const AccessorROfloat fa_resistance(regions[1], FID_RESISTANCE);
const AccessorROfloat fa_wire_cap(regions[1], FID_WIRE_CAP);
const AccessorROfloat fa_pvt_voltage(regions[2], FID_NODE_VOLTAGE);
const AccessorROfloat fa_shr_voltage(regions[3], FID_NODE_VOLTAGE);
const AccessorROfloat fa_ghost_voltage(regions[4], FID_NODE_VOLTAGE);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( calc_new_currents_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_wire,
piece.num_wires,
piece.dt,
piece.steps,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_inductance,
fa_resistance,
fa_wire_cap,
fa_pvt_voltage,
fa_shr_voltage,
fa_ghost_voltage,
fa_currents,
fa_voltages);
#endif
}
typedef ReductionAccessor<GPUAccumulateCharge,false/*exclusive*/,1,coord_t,
Realm::AffineAccessor<float,1,coord_t> > AccessorRDfloat;
__device__ __forceinline__
void reduce_local(const AccessorRWfloat &pvt,
const AccessorRDfloat &shr,
const AccessorRDfloat &ghost,
Point<1> ptr, PointerLocation loc, float value)
{
switch (loc)
{
case PRIVATE_PTR:
GPUAccumulateCharge::apply<true/*exclusive*/>(pvt[ptr], value);
break;
case SHARED_PTR:
shr[ptr] <<= value;
break;
case GHOST_PTR:
ghost[ptr] <<= value;
break;
default:
break; // assert(false); // should never make it here
}
}
__global__
void distribute_charge_kernel(Point<1> first,
const int num_wires,
float dt,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_in_current,
const AccessorROfloat fa_out_current,
const AccessorRWfloat fa_pvt_charge,
const AccessorRDfloat fa_shr_charge,
const AccessorRDfloat fa_ghost_charge)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float in_dq = -dt * fa_in_current[wire_ptr];
float out_dq = dt * fa_out_current[wire_ptr];
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, in_ptr, in_loc, in_dq);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, out_ptr, out_loc, out_dq);
}
}
/*static*/
__host__
void DistributeChargeTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorROpoint fa_in_ptr(regions[0], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[0], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[0], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[0], FID_OUT_LOC);
const AccessorROfloat fa_in_current(regions[0], FID_CURRENT);
const AccessorROfloat fa_out_current(regions[0], FID_CURRENT+WIRE_SEGMENTS-1);
const AccessorRWfloat fa_pvt_charge(regions[1], FID_CHARGE);
const AccessorRDfloat fa_shr_charge(regions[2], FID_CHARGE, REDUCE_ID);
const AccessorRDfloat fa_ghost_charge(regions[3], FID_CHARGE, REDUCE_ID);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( distribute_charge_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_wire,
piece.num_wires,
piece.dt,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_in_current,
fa_out_current,
fa_pvt_charge,
fa_shr_charge,
fa_ghost_charge);
#endif
}
__global__
void update_voltages_kernel(Point<1> first,
const int num_nodes,
const AccessorRWfloat fa_pvt_voltage,
const AccessorRWfloat fa_shr_voltage,
const AccessorRWfloat fa_pvt_charge,
const AccessorRWfloat fa_shr_charge,
const AccessorROfloat fa_pvt_cap,
const AccessorROfloat fa_shr_cap,
const AccessorROfloat fa_pvt_leakage,
const AccessorROfloat fa_shr_leakage,
const AccessorROloc fa_ptr_loc)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
const Point<1> node_ptr = first + tid;
PointerLocation node_loc = fa_ptr_loc[node_ptr];
if (node_loc == PRIVATE_PTR)
{
float voltage = fa_pvt_voltage[node_ptr];
float charge = fa_pvt_charge[node_ptr];
float capacitance = fa_pvt_cap[node_ptr];
float leakage = fa_pvt_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
else
{
float voltage = fa_shr_voltage[node_ptr];
float charge = fa_shr_charge[node_ptr];
float capacitance = fa_shr_cap[node_ptr];
float leakage = fa_shr_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
}
}
/*static*/
__host__
void UpdateVoltagesTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorRWfloat fa_pvt_voltage(regions[0], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_pvt_charge(regions[0], FID_CHARGE);
const AccessorRWfloat fa_shr_voltage(regions[1], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_shr_charge(regions[1], FID_CHARGE);
const AccessorROfloat fa_pvt_cap(regions[2], FID_NODE_CAP);
const AccessorROfloat fa_pvt_leakage(regions[2], FID_LEAKAGE);
const AccessorROfloat fa_shr_cap(regions[3], FID_NODE_CAP);
const AccessorROfloat fa_shr_leakage(regions[3], FID_LEAKAGE);
const AccessorROloc fa_ptr_loc(regions[4], FID_LOCATOR);
const int threads_per_block = 256;
const int num_blocks = (piece.num_nodes + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( update_voltages_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_node,
piece.num_nodes,
fa_pvt_voltage,
fa_shr_voltage,
fa_pvt_charge,
fa_shr_charge,
fa_pvt_cap,
fa_shr_cap,
fa_pvt_leakage,
fa_shr_leakage,
fa_ptr_loc);
#endif
}
| 517a5f5354bfaaa9c9439058a4532bf959f5a631.cu | /* Copyright 2020 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "cuda_runtime.h"
class GPUAccumulateCharge {
public:
typedef float LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
#ifdef __CUDA_ARCH__
float *target = &lhs;
atomicAdd(target,rhs);
#else
assert(false);
#endif
}
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
#ifdef __CUDA_ARCH__
float *target = &rhs1;
atomicAdd(target,rhs2);
#else
assert(false);
#endif
}
};
template<typename AT, int SEGMENTS>
struct SegmentAccessors {
public:
__host__ __device__
inline AT& operator[](unsigned index) { return accessors[index]; }
__host__ __device__
inline const AT& operator[](unsigned index) const { return accessors[index]; }
public:
AT accessors[SEGMENTS];
};
__device__ __forceinline__
float find_node_voltage(const AccessorROfloat &pvt,
const AccessorROfloat &shr,
const AccessorROfloat &ghost,
Point<1> ptr, PointerLocation loc)
{
switch (loc)
{
case PRIVATE_PTR:
return pvt[ptr];
case SHARED_PTR:
return shr[ptr];
case GHOST_PTR:
return ghost[ptr];
default:
break; // assert(false);
}
return 0.f;
}
__global__
void calc_new_currents_kernel(Point<1> first,
int num_wires,
float dt,
int steps,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_inductance,
const AccessorROfloat fa_resistance,
const AccessorROfloat fa_wire_cap,
const AccessorROfloat fa_pvt_voltage,
const AccessorROfloat fa_shr_voltage,
const AccessorROfloat fa_ghost_voltage,
const SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS> fa_currents,
const SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS-1> fa_voltages)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We can do this because we know we have SOA layout and wires are dense
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float recip_dt = 1.f/dt;
float temp_v[WIRE_SEGMENTS+1];
float temp_i[WIRE_SEGMENTS];
float old_i[WIRE_SEGMENTS];
float old_v[WIRE_SEGMENTS-1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = fa_currents[i][wire_ptr];
old_i[i] = temp_i[i];
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = fa_voltages[i][wire_ptr];
old_v[i] = temp_v[i+1];
}
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
temp_v[0] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
temp_v[WIRE_SEGMENTS] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
// Solve the RLC model iteratively
float inductance = fa_inductance[wire_ptr];
float recip_resistance = 1.f/fa_resistance[wire_ptr];
float recip_capacitance = 1.f/fa_wire_cap[wire_ptr];
for (int j = 0; j < steps; j++)
{
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = ((temp_v[i] - temp_v[i+1]) -
(inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the result
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i][wire_ptr] = temp_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i][wire_ptr] = temp_v[i+1];
}
}
/*static*/
__host__
void CalcNewCurrentsTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
// the segment accessors don't need to pay for bounds checks because
// other wire accessors below will use the same bounds and be checked
// first
SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS> fa_currents;
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i] = AccessorRWfloat_nobounds(regions[0], FID_CURRENT+i);
SegmentAccessors<AccessorRWfloat_nobounds,WIRE_SEGMENTS-1> fa_voltages;
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i] = AccessorRWfloat_nobounds(regions[0], FID_WIRE_VOLTAGE+i);
const AccessorROpoint fa_in_ptr(regions[1], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[1], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[1], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[1], FID_OUT_LOC);
const AccessorROfloat fa_inductance(regions[1], FID_INDUCTANCE);
const AccessorROfloat fa_resistance(regions[1], FID_RESISTANCE);
const AccessorROfloat fa_wire_cap(regions[1], FID_WIRE_CAP);
const AccessorROfloat fa_pvt_voltage(regions[2], FID_NODE_VOLTAGE);
const AccessorROfloat fa_shr_voltage(regions[3], FID_NODE_VOLTAGE);
const AccessorROfloat fa_ghost_voltage(regions[4], FID_NODE_VOLTAGE);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
calc_new_currents_kernel<<<num_blocks,threads_per_block>>>(piece.first_wire,
piece.num_wires,
piece.dt,
piece.steps,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_inductance,
fa_resistance,
fa_wire_cap,
fa_pvt_voltage,
fa_shr_voltage,
fa_ghost_voltage,
fa_currents,
fa_voltages);
#endif
}
typedef ReductionAccessor<GPUAccumulateCharge,false/*exclusive*/,1,coord_t,
Realm::AffineAccessor<float,1,coord_t> > AccessorRDfloat;
__device__ __forceinline__
void reduce_local(const AccessorRWfloat &pvt,
const AccessorRDfloat &shr,
const AccessorRDfloat &ghost,
Point<1> ptr, PointerLocation loc, float value)
{
switch (loc)
{
case PRIVATE_PTR:
GPUAccumulateCharge::apply<true/*exclusive*/>(pvt[ptr], value);
break;
case SHARED_PTR:
shr[ptr] <<= value;
break;
case GHOST_PTR:
ghost[ptr] <<= value;
break;
default:
break; // assert(false); // should never make it here
}
}
__global__
void distribute_charge_kernel(Point<1> first,
const int num_wires,
float dt,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_in_current,
const AccessorROfloat fa_out_current,
const AccessorRWfloat fa_pvt_charge,
const AccessorRDfloat fa_shr_charge,
const AccessorRDfloat fa_ghost_charge)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float in_dq = -dt * fa_in_current[wire_ptr];
float out_dq = dt * fa_out_current[wire_ptr];
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, in_ptr, in_loc, in_dq);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, out_ptr, out_loc, out_dq);
}
}
/*static*/
__host__
void DistributeChargeTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorROpoint fa_in_ptr(regions[0], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[0], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[0], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[0], FID_OUT_LOC);
const AccessorROfloat fa_in_current(regions[0], FID_CURRENT);
const AccessorROfloat fa_out_current(regions[0], FID_CURRENT+WIRE_SEGMENTS-1);
const AccessorRWfloat fa_pvt_charge(regions[1], FID_CHARGE);
const AccessorRDfloat fa_shr_charge(regions[2], FID_CHARGE, REDUCE_ID);
const AccessorRDfloat fa_ghost_charge(regions[3], FID_CHARGE, REDUCE_ID);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
distribute_charge_kernel<<<num_blocks,threads_per_block>>>(piece.first_wire,
piece.num_wires,
piece.dt,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_in_current,
fa_out_current,
fa_pvt_charge,
fa_shr_charge,
fa_ghost_charge);
#endif
}
__global__
void update_voltages_kernel(Point<1> first,
const int num_nodes,
const AccessorRWfloat fa_pvt_voltage,
const AccessorRWfloat fa_shr_voltage,
const AccessorRWfloat fa_pvt_charge,
const AccessorRWfloat fa_shr_charge,
const AccessorROfloat fa_pvt_cap,
const AccessorROfloat fa_shr_cap,
const AccessorROfloat fa_pvt_leakage,
const AccessorROfloat fa_shr_leakage,
const AccessorROloc fa_ptr_loc)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
const Point<1> node_ptr = first + tid;
PointerLocation node_loc = fa_ptr_loc[node_ptr];
if (node_loc == PRIVATE_PTR)
{
float voltage = fa_pvt_voltage[node_ptr];
float charge = fa_pvt_charge[node_ptr];
float capacitance = fa_pvt_cap[node_ptr];
float leakage = fa_pvt_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
else
{
float voltage = fa_shr_voltage[node_ptr];
float charge = fa_shr_charge[node_ptr];
float capacitance = fa_shr_cap[node_ptr];
float leakage = fa_shr_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
}
}
/*static*/
__host__
void UpdateVoltagesTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorRWfloat fa_pvt_voltage(regions[0], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_pvt_charge(regions[0], FID_CHARGE);
const AccessorRWfloat fa_shr_voltage(regions[1], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_shr_charge(regions[1], FID_CHARGE);
const AccessorROfloat fa_pvt_cap(regions[2], FID_NODE_CAP);
const AccessorROfloat fa_pvt_leakage(regions[2], FID_LEAKAGE);
const AccessorROfloat fa_shr_cap(regions[3], FID_NODE_CAP);
const AccessorROfloat fa_shr_leakage(regions[3], FID_LEAKAGE);
const AccessorROloc fa_ptr_loc(regions[4], FID_LOCATOR);
const int threads_per_block = 256;
const int num_blocks = (piece.num_nodes + (threads_per_block-1)) / threads_per_block;
update_voltages_kernel<<<num_blocks,threads_per_block>>>(piece.first_node,
piece.num_nodes,
fa_pvt_voltage,
fa_shr_voltage,
fa_pvt_charge,
fa_shr_charge,
fa_pvt_cap,
fa_shr_cap,
fa_pvt_leakage,
fa_shr_leakage,
fa_ptr_loc);
#endif
}
|
6d677c807e3fd1d42f7650527c408e721ec63dae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sigmoid_grad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *pre_grad = NULL;
hipMalloc(&pre_grad, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sigmoid_grad), dim3(gridBlock),dim3(threadBlock), 0, 0, pre_grad,output,rows,cols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sigmoid_grad), dim3(gridBlock),dim3(threadBlock), 0, 0, pre_grad,output,rows,cols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sigmoid_grad), dim3(gridBlock),dim3(threadBlock), 0, 0, pre_grad,output,rows,cols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6d677c807e3fd1d42f7650527c408e721ec63dae.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sigmoid_grad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *pre_grad = NULL;
cudaMalloc(&pre_grad, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sigmoid_grad<<<gridBlock,threadBlock>>>(pre_grad,output,rows,cols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sigmoid_grad<<<gridBlock,threadBlock>>>(pre_grad,output,rows,cols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sigmoid_grad<<<gridBlock,threadBlock>>>(pre_grad,output,rows,cols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cefb92d735b3e4b9a41ebcaf4fdf4275b95af226.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void Dihedral14CFAtomEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd,
const VECTOR *boxlength, const int *a_14, const int *b_14,
const float *cf_scale_factor, float *ene) {
int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_14_i < dihedral_14_numbers) {
int atom_i = a_14[dihedral_14_i];
int atom_j = b_14[dihedral_14_i];
UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i];
UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j];
int int_x;
int int_y;
int int_z;
VECTOR dr;
float r_1;
float ene_lin = 0.;
int_x = r2.uint_x - r1.uint_x;
int_y = r2.uint_y - r1.uint_y;
int_z = r2.uint_z - r1.uint_z;
dr.x = boxlength[0].x * int_x;
dr.y = boxlength[0].y * int_y;
dr.z = boxlength[0].z * int_z;
r_1 = rnorm3df(dr.x, dr.y, dr.z);
ene_lin = r1.charge * r2.charge * r_1;
ene_lin *= cf_scale_factor[dihedral_14_i];
atomicAdd(&ene[atom_i], ene_lin);
}
}
void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *cf_scale_factor, float *ene, hipStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(atom_numbers) / 128);
UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL;
Cuda_Malloc_Safely(reinterpret_cast<void **>(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
hipLaunchKernelGGL(( Copy_Crd_To_New_Crd_Start), dim3(ceilf(static_cast<float>(atom_numbers) / 32)), dim3(32), 0, stream,
atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge);
VECTOR *boxlength = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(boxlength_f));
hipLaunchKernelGGL(( Reset_List), dim3(ceilf(static_cast<float>(3. * atom_numbers) / 128)), dim3(128), 0, stream, atom_numbers, ene, 0.);
hipLaunchKernelGGL(( Dihedral14CFAtomEnergyKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream,
dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, cf_scale_factor, ene);
hipStreamSynchronize(stream);
return;
}
void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *cf_scale_factor, float *ene, hipStream_t stream);
| cefb92d735b3e4b9a41ebcaf4fdf4275b95af226.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/nb14/dihedral_14_cf_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void Dihedral14CFAtomEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd,
const VECTOR *boxlength, const int *a_14, const int *b_14,
const float *cf_scale_factor, float *ene) {
int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_14_i < dihedral_14_numbers) {
int atom_i = a_14[dihedral_14_i];
int atom_j = b_14[dihedral_14_i];
UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i];
UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j];
int int_x;
int int_y;
int int_z;
VECTOR dr;
float r_1;
float ene_lin = 0.;
int_x = r2.uint_x - r1.uint_x;
int_y = r2.uint_y - r1.uint_y;
int_z = r2.uint_z - r1.uint_z;
dr.x = boxlength[0].x * int_x;
dr.y = boxlength[0].y * int_y;
dr.z = boxlength[0].z * int_z;
r_1 = rnorm3df(dr.x, dr.y, dr.z);
ene_lin = r1.charge * r2.charge * r_1;
ene_lin *= cf_scale_factor[dihedral_14_i];
atomicAdd(&ene[atom_i], ene_lin);
}
}
void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *cf_scale_factor, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(atom_numbers) / 128);
UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL;
Cuda_Malloc_Safely(reinterpret_cast<void **>(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
Copy_Crd_To_New_Crd_Start<<<ceilf(static_cast<float>(atom_numbers) / 32), 32, 0, stream>>>(
atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge);
VECTOR *boxlength = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(boxlength_f));
Reset_List<<<ceilf(static_cast<float>(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, ene, 0.);
Dihedral14CFAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, cf_scale_factor, ene);
cudaStreamSynchronize(stream);
return;
}
void Dihedral14CFAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *cf_scale_factor, float *ene, cudaStream_t stream);
|
d1459fab30cc8699023b2ef8b57dfd6c15092616.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled.
///////////For K40, L2 prefetching exists. L1 prefetching does not.
///////////The data found in cache depends on the data size. So the prefetching is caused by the memcpy which goes through the L2 as well.
///////////Since 4mb (largest data size used) is far less to saturate the tlbs, if tlbs show misses using it, it means that both L1 and L2 tlbs does not prefetch.
///////////However, when data size gets smaller (when it starts to fit in L2), L2 tlbs seems to prefetch.
///////////When it gets even smaller (but not yet fit in L1), L1 tlbs seems to prefetch as well.
//typedef unsigned char byte;
void init_cpu_data(int* A, int size, int stride, int mod){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, iterations, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
for(int data_stride = 32; data_stride <= 32; data_stride = data_stride + 1){/////////stride shall be L1 cache line size.
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 4;/////4mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 2;/////2mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 1.5;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 8;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 4;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 2;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| d1459fab30cc8699023b2ef8b57dfd6c15092616.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled.
///////////For K40, L2 prefetching exists. L1 prefetching does not.
///////////The data found in cache depends on the data size. So the prefetching is caused by the memcpy which goes through the L2 as well.
///////////Since 4mb (largest data size used) is far less to saturate the tlbs, if tlbs show misses using it, it means that both L1 and L2 tlbs does not prefetch.
///////////However, when data size gets smaller (when it starts to fit in L2), L2 tlbs seems to prefetch.
///////////When it gets even smaller (but not yet fit in L1), L1 tlbs seems to prefetch as well.
//typedef unsigned char byte;
void init_cpu_data(int* A, int size, int stride, int mod){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, iterations, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
for(int data_stride = 32; data_stride <= 32; data_stride = data_stride + 1){/////////stride shall be L1 cache line size.
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 4;/////4mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 2;/////2mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 1.5;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 8;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 4;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 2;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
8eb46716b57055fa76b4d314c1fd5ac05b2b779c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <limits>
#include "inttypes.h"
#include "cudaErrorHadling.h"
void launchSumKernel(int32_t *a, int32_t *b, int32_t *c, int32_t n);
const char * const printMemorySize(size_t bytes){
char inches[] = {' ', 'K', 'M', 'G', 'T'};
double sz = bytes;
int inch = 0;
for (; sz > 512 && inch < 5; ++inch){
sz /= 1024;
}
static char ret[64];
sprintf(ret, "%.2f %cB", sz, inches[inch]);
return ret;
}
float timer(){
static clock_t timer = 0;
if (!timer){
timer = clock();
return 0;
}
clock_t current = clock();
float ret = ((float)(current - timer))/CLOCKS_PER_SEC;
timer = current;
return ret;
}
bool ourRequirementsPassed(const hipDeviceProp_t & devProp){
return devProp.major >= 1;
}
int selectCUDADevice(){
int deviceCount = 0, suitableDevice = -1;
hipDeviceProp_t devProp;
hipGetDeviceCount( &deviceCount );
std::cout << "Found "<< deviceCount << " devices: \n";
for (int device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties ( &devProp, device );
std::cout << "Device: " << device << std::endl;
std::cout << " Compute capability: " << devProp.major << "." << devProp.minor << std::endl;
std::cout << " Name: " << devProp.name << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Total Global Memory: " << printMemorySize(devProp.totalGlobalMem) << std::endl;
std::cout << " Shared Memory Per Block: " << printMemorySize(devProp.sharedMemPerBlock) << std::endl;
std::cout << " Total Const Memory: " << printMemorySize(devProp.totalConstMem) << std::endl;
std::cout << " L2 Cache size: " << printMemorySize(devProp.l2CacheSize) << std::endl;
std::cout << " Memory bus width: " << printMemorySize(devProp.memoryBusWidth/8) << std::endl;
std::cout << " Memory frequency: " << devProp.memoryClockRate << " kHz" << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Multiprocessors: " << devProp.multiProcessorCount << std::endl;
std::cout << " Clock rate: " << devProp.clockRate << " kHz" << std::endl;
std::cout << " Warp Size: " << devProp.warpSize << std::endl;
std::cout << " Max grid size: " << "(" << devProp.maxGridSize[0] << ", " << devProp.maxGridSize[1] << ", " << devProp.maxGridSize[2] << ")" << std::endl;
std::cout << " Max block size: " << "(" << devProp.maxThreadsDim[0] << ", " << devProp.maxThreadsDim[1] << ", " << devProp.maxThreadsDim[2] << ")" << std::endl;
std::cout << " Max threads per multiprocessor: " << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << " Max threads per block: " << devProp.maxThreadsPerBlock << std::endl;
std::cout << " Registers per block: " << devProp.regsPerBlock << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << std::endl;
if(suitableDevice < 0 && ourRequirementsPassed(devProp)){
suitableDevice = device;
}
}
return suitableDevice;
}
void initializeRandomArray(int *array, int length){
static const int32_t MY_INT32_MAX = std::numeric_limits<int32_t>::max();
for(int i =0; i < length; ++i){
array[i] = rand() % MY_INT32_MAX;
}
}
int main(int argc, char *argv[]){
//------------- Variables -----------
hipEvent_t start, stop;
float timeCPU = 0.0,
timeGPU = 0.0;
int n = 1024;
int32_t *aHost = NULL, *bHost = NULL, *cHost = NULL;
int32_t *aDev = NULL , *bDev = NULL , *cDev = NULL, *answer = NULL;
//-----------------------------------
//--------- Command line -----------
if(argc != 2){
std::cout << "You may define vector size via comandline as: $<program_name> <vector_size>\n";
std::cout << "Default vector size is: " << n << std::endl;
}
else{
int tmp = atoi(argv[1]);
if (tmp > 1){
n = tmp;
}
}
//----------------------------------
//-------- Select device -----------
int device = selectCUDADevice();
if(device == -1) {
std::cout << "Can not find suitable device" << "\n";
return EXIT_FAILURE;
}
SAFE_CALL(hipSetDevice(device));
//-----------------------------------
//------- Host memory allocation ------------
int nb = n*sizeof(int);
aHost = (int32_t*)malloc(nb);
bHost = (int32_t*)malloc(nb);
cHost = (int32_t*)malloc(nb);
answer = (int32_t*)malloc(nb);
//-------------------------------------------
//-------- Initialization arrays by random values ------
srand(clock());
initializeRandomArray(aHost, n);
initializeRandomArray(bHost, n);
//------------------------------------------------------
//-------- Calculation on CPU --------------------------
timer();
for(int i = 0; i < n; ++i){
cHost[i] = aHost[i] + bHost[i];
}
timeCPU = timer();
fflush(stdout);
//------------------------------------------------------
//----- GPU memory allocation and initialization -------
SAFE_CALL( hipMalloc((void**)&aDev, nb) );
SAFE_CALL( hipMalloc((void**)&bDev, nb) ) ;
SAFE_CALL( hipMalloc((void**)&cDev, nb) ) ;
SAFE_CALL( hipMemcpy(aDev, aHost, nb, hipMemcpyHostToDevice) );
SAFE_CALL( hipMemcpy(bDev, bHost, nb, hipMemcpyHostToDevice) );
//------------------------------------------------------
//------ Calculation on GPU --------------
SAFE_CALL( hipEventCreate(&start) );
SAFE_CALL( hipEventCreate(&stop) );
SAFE_CALL( hipEventRecord(start, 0) );
launchSumKernel(aDev, bDev, cDev, n);
SAFE_CALL( hipEventRecord(stop, 0) );
SAFE_CALL( hipEventSynchronize(stop) );
SAFE_CALL( hipEventElapsedTime(&timeGPU, start, stop) );
//--------------------------------------
printf("processing time on GPU: %4.4f s\n", timeGPU/1000.0);
printf("processing time on CPU: %4.4f s\n", timeCPU);
//--------- Compare GPU and CPU results -----------------------------
SAFE_CALL( hipMemcpy(answer, cDev, nb, hipMemcpyDeviceToHost) );
for(int i = 0; i < n; ++i){
if(cHost[i] != answer[i]) {
std::cout << "Incorrect result at [" << i << "]: " << aHost[i] << " + " << bHost[i] << " = " << answer[i] << "\n";
break;
}
}
//--------------------------------------------------------------------
SAFE_CALL( hipFree(aDev) );
SAFE_CALL( hipFree(bDev) );
SAFE_CALL( hipFree(cDev) );
free(aHost);
free(bHost);
free(cHost);
free(answer);
return EXIT_SUCCESS;
}
| 8eb46716b57055fa76b4d314c1fd5ac05b2b779c.cu | #include <iostream>
#include <cuda_runtime.h>
#include <time.h>
#include <limits>
#include "inttypes.h"
#include "cudaErrorHadling.h"
void launchSumKernel(int32_t *a, int32_t *b, int32_t *c, int32_t n);
const char * const printMemorySize(size_t bytes){
char inches[] = {' ', 'K', 'M', 'G', 'T'};
double sz = bytes;
int inch = 0;
for (; sz > 512 && inch < 5; ++inch){
sz /= 1024;
}
static char ret[64];
sprintf(ret, "%.2f %cB", sz, inches[inch]);
return ret;
}
float timer(){
static clock_t timer = 0;
if (!timer){
timer = clock();
return 0;
}
clock_t current = clock();
float ret = ((float)(current - timer))/CLOCKS_PER_SEC;
timer = current;
return ret;
}
bool ourRequirementsPassed(const cudaDeviceProp & devProp){
return devProp.major >= 1;
}
int selectCUDADevice(){
int deviceCount = 0, suitableDevice = -1;
cudaDeviceProp devProp;
cudaGetDeviceCount( &deviceCount );
std::cout << "Found "<< deviceCount << " devices: \n";
for (int device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties ( &devProp, device );
std::cout << "Device: " << device << std::endl;
std::cout << " Compute capability: " << devProp.major << "." << devProp.minor << std::endl;
std::cout << " Name: " << devProp.name << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Total Global Memory: " << printMemorySize(devProp.totalGlobalMem) << std::endl;
std::cout << " Shared Memory Per Block: " << printMemorySize(devProp.sharedMemPerBlock) << std::endl;
std::cout << " Total Const Memory: " << printMemorySize(devProp.totalConstMem) << std::endl;
std::cout << " L2 Cache size: " << printMemorySize(devProp.l2CacheSize) << std::endl;
std::cout << " Memory bus width: " << printMemorySize(devProp.memoryBusWidth/8) << std::endl;
std::cout << " Memory frequency: " << devProp.memoryClockRate << " kHz" << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Multiprocessors: " << devProp.multiProcessorCount << std::endl;
std::cout << " Clock rate: " << devProp.clockRate << " kHz" << std::endl;
std::cout << " Warp Size: " << devProp.warpSize << std::endl;
std::cout << " Max grid size: " << "(" << devProp.maxGridSize[0] << ", " << devProp.maxGridSize[1] << ", " << devProp.maxGridSize[2] << ")" << std::endl;
std::cout << " Max block size: " << "(" << devProp.maxThreadsDim[0] << ", " << devProp.maxThreadsDim[1] << ", " << devProp.maxThreadsDim[2] << ")" << std::endl;
std::cout << " Max threads per multiprocessor: " << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << " Max threads per block: " << devProp.maxThreadsPerBlock << std::endl;
std::cout << " Registers per block: " << devProp.regsPerBlock << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << std::endl;
if(suitableDevice < 0 && ourRequirementsPassed(devProp)){
suitableDevice = device;
}
}
return suitableDevice;
}
void initializeRandomArray(int *array, int length){
static const int32_t MY_INT32_MAX = std::numeric_limits<int32_t>::max();
for(int i =0; i < length; ++i){
array[i] = rand() % MY_INT32_MAX;
}
}
int main(int argc, char *argv[]){
//------------- Variables -----------
cudaEvent_t start, stop;
float timeCPU = 0.0,
timeGPU = 0.0;
int n = 1024;
int32_t *aHost = NULL, *bHost = NULL, *cHost = NULL;
int32_t *aDev = NULL , *bDev = NULL , *cDev = NULL, *answer = NULL;
//-----------------------------------
//--------- Command line -----------
if(argc != 2){
std::cout << "You may define vector size via comandline as: $<program_name> <vector_size>\n";
std::cout << "Default vector size is: " << n << std::endl;
}
else{
int tmp = atoi(argv[1]);
if (tmp > 1){
n = tmp;
}
}
//----------------------------------
//-------- Select device -----------
int device = selectCUDADevice();
if(device == -1) {
std::cout << "Can not find suitable device" << "\n";
return EXIT_FAILURE;
}
SAFE_CALL(cudaSetDevice(device));
//-----------------------------------
//------- Host memory allocation ------------
int nb = n*sizeof(int);
aHost = (int32_t*)malloc(nb);
bHost = (int32_t*)malloc(nb);
cHost = (int32_t*)malloc(nb);
answer = (int32_t*)malloc(nb);
//-------------------------------------------
//-------- Initialization arrays by random values ------
srand(clock());
initializeRandomArray(aHost, n);
initializeRandomArray(bHost, n);
//------------------------------------------------------
//-------- Calculation on CPU --------------------------
timer();
for(int i = 0; i < n; ++i){
cHost[i] = aHost[i] + bHost[i];
}
timeCPU = timer();
fflush(stdout);
//------------------------------------------------------
//----- GPU memory allocation and initialization -------
SAFE_CALL( cudaMalloc((void**)&aDev, nb) );
SAFE_CALL( cudaMalloc((void**)&bDev, nb) ) ;
SAFE_CALL( cudaMalloc((void**)&cDev, nb) ) ;
SAFE_CALL( cudaMemcpy(aDev, aHost, nb, cudaMemcpyHostToDevice) );
SAFE_CALL( cudaMemcpy(bDev, bHost, nb, cudaMemcpyHostToDevice) );
//------------------------------------------------------
//------ Calculation on GPU --------------
SAFE_CALL( cudaEventCreate(&start) );
SAFE_CALL( cudaEventCreate(&stop) );
SAFE_CALL( cudaEventRecord(start, 0) );
launchSumKernel(aDev, bDev, cDev, n);
SAFE_CALL( cudaEventRecord(stop, 0) );
SAFE_CALL( cudaEventSynchronize(stop) );
SAFE_CALL( cudaEventElapsedTime(&timeGPU, start, stop) );
//--------------------------------------
printf("processing time on GPU: %4.4f s\n", timeGPU/1000.0);
printf("processing time on CPU: %4.4f s\n", timeCPU);
//--------- Compare GPU and CPU results -----------------------------
SAFE_CALL( cudaMemcpy(answer, cDev, nb, cudaMemcpyDeviceToHost) );
for(int i = 0; i < n; ++i){
if(cHost[i] != answer[i]) {
std::cout << "Incorrect result at [" << i << "]: " << aHost[i] << " + " << bHost[i] << " = " << answer[i] << "\n";
break;
}
}
//--------------------------------------------------------------------
SAFE_CALL( cudaFree(aDev) );
SAFE_CALL( cudaFree(bDev) );
SAFE_CALL( cudaFree(cDev) );
free(aHost);
free(bHost);
free(cHost);
free(answer);
return EXIT_SUCCESS;
}
|
7b545e1b3a2cb71ced83f35c4cec9f12e89d9745.hip | // !!! This is a file automatically generated by hipify!!!
/*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "lptf/Profile.h"
#include "lm/Cuda.h"
#include "lm/Math.h"
#include "TimingConstants.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 1
#if !defined LS_X_BLOCK_MAX_X_SIZE
#define LS_X_BLOCK_MAX_X_SIZE 256
#endif
#if !defined LS_Y_BLOCK_X_SIZE
#define LS_Y_BLOCK_X_SIZE 32
#endif
#if !defined LS_Y_BLOCK_Y_SIZE
#define LS_Y_BLOCK_Y_SIZE 4
#endif
#if !defined LS_Z_BLOCK_X_SIZE
#define LS_Z_BLOCK_X_SIZE 32
#endif
#if !defined LS_Z_BLOCK_Z_SIZE
#define LS_Z_BLOCK_Z_SIZE 4
#endif
#define LS_PACKED_SITES
#define LS_PACKED_LAST_OBJECT_MASK 0xFF000000
#define MPD_MAX_PARTICLE_OVERFLOWS 512
#define MPD_OVERFLOW_LIST_ENTRIES 1+2*MPD_MAX_PARTICLE_OVERFLOWS
#include "lm/rdme/dev/xor_random_dev.cu"
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
#include "lm/rdme/dev/byte_diffusion_1d_dev.cu"
// Allocate the profile space.
PROF_ALLOC;
#define X_SIZE 128
#define Y_SIZE 128
#define Z_SIZE 64
#define PARTICLE_COUNT 216720 // 1 mM
__global__ void x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeZSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
void runTimestep(hipStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed) throw(lm::CUDAException);
int main(int argc, char **argv)
{
try
{
PROF_INIT;
PROF_BEGIN(PROF_MAIN_RUN);
// Allocate the cuda resources.
hipStream_t stream;
unsigned int* startLattice;
unsigned int* startLatticeCounts;
void* inLattice;
void* outLattice;
void* overflowList;
startLattice = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE];
startLatticeCounts = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE];
memset(startLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int));
memset(startLatticeCounts, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int));
CUDA_EXCEPTION_CHECK(hipStreamCreate(&stream));
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&overflowList, MPD_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Fill in some random particles.
srand(2010);
for (unsigned int i=0; i<PARTICLE_COUNT; i++)
{
unsigned int r = (unsigned int)((((double)rand())/((double)RAND_MAX))*((double)X_SIZE)*((double)Y_SIZE)*((double)Z_SIZE));
if (startLatticeCounts[r] < 4)
{
((unsigned char*)&startLattice[r])[startLatticeCounts[r]] = (rand()%255)+1;
startLatticeCounts[r]++;
}
else if (startLatticeCounts[r] < 8)
{
((unsigned char*)&startLattice[r+(X_SIZE*Y_SIZE*Z_SIZE)])[startLatticeCounts[r]] = (rand()%255)+1;
startLatticeCounts[r]++;
}
else
{
printf("Warning: skipped adding particle to fully occupied site.\n");
}
}
// Start timings the kernels.
PROF_BEGIN(PROF_SUBMIT_KERNELS);
PROF_CUDA_START(stream);
// Launch the kernels.
int NUM_LAUNCHES=100;
for (int i=0; i<NUM_LAUNCHES; i++)
{
// Reset the memory.
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, startLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemset(overflowList, 0, MPD_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Run the timestep.
PROF_CUDA_BEGIN(PROF_TIMESTEP_RUNNING,stream);
runTimestep(stream, inLattice, outLattice, overflowList, 1, 2, 3);
PROF_CUDA_END(PROF_TIMESTEP_RUNNING,stream);
}
// Wait for all of the kernels to finish.
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(stream));
// Record the timings.
PROF_CUDA_FINISH(stream);
CUDA_EXCEPTION_CHECK(hipFree(overflowList));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
delete[] startLatticeCounts;
delete[] startLattice;
CUDA_EXCEPTION_CHECK(hipStreamDestroy(stream));
PROF_END(PROF_SUBMIT_KERNELS);
printf("Profile file saved as: %s\n",PROF_MAKE_STR(PROF_OUT_FILE));
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return 0;
}
catch (lm::CUDAException& e)
{
std::cerr << "CUDA Exception during execution: " << e.what() << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Exception during execution: " << e.what() << std::endl;
}
catch (...)
{
std::cerr << "Unknown Exception during execution." << std::endl;
}
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return -1;
}
void runTimestep(hipStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed)
throw(lm::CUDAException)
{
// Calculate some properties of the lattice.
const unsigned int latticeXSize = X_SIZE;
const unsigned int latticeYSize = Y_SIZE;
const unsigned int latticeZSize = Z_SIZE;
const unsigned int latticeXYSize = X_SIZE*Y_SIZE;
const unsigned int latticeXYZSize = X_SIZE*Y_SIZE*Z_SIZE;
// Execute the kernel for the x direction.
PROF_CUDA_BEGIN(PROF_X_DIFFUSION,stream);
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
if (!calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeXSize, latticeZSize))
throw lm::InvalidArgException("Unable to calculate correct x launch parameters, the lattice size is incompatible.");
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((x_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, xseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_X_DIFFUSION,stream);
// Execute the kernel for the y direction.
PROF_CUDA_BEGIN(PROF_Y_DIFFUSION,stream);
if (!calculateYLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_Y_BLOCK_X_SIZE, LS_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize))
throw lm::InvalidArgException("Unable to calculate correct y launch parameters, the lattice size is incompatible.");
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((y_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int*)outLattice, (unsigned int*)inLattice, gridXSize, latticeXSize, latticeYSize, latticeXYSize, latticeXYZSize, yseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Y_DIFFUSION,stream);
// Execute the kernel for the z direction.
PROF_CUDA_BEGIN(PROF_Z_DIFFUSION,stream);
if (!calculateZLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_Z_BLOCK_X_SIZE, LS_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize))
throw lm::InvalidArgException("Unable to calculate correct z launch parameters, the lattice size is incompatible.");
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((z_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeZSize, latticeXYSize, latticeXYZSize, zseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Z_DIFFUSION,stream);
}
__global__ void __launch_bounds__(LS_X_BLOCK_MAX_X_SIZE,1) x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
__syncthreads();
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Make the choices.
makeXDiffusionChoices(window, choices, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex, blockDim.x, timestepHash);
__syncthreads();
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Propagate the choices to the new lattice segment.
performPropagation(outLattice, window, choices, latticeIndex, latticeXYZSize, windowIndex-1, windowIndex, windowIndex+1, LS_X_WINDOW_SIZE, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void __launch_bounds__(LS_Y_BLOCK_X_SIZE*LS_Y_BLOCK_Y_SIZE,1) y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y;
unsigned int latticeIndex = (bz*latticeXYSize) + (latticeYIndex*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowYIndex = threadIdx.y+LS_APRON_SIZE;
unsigned int windowIndex = (windowYIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Y_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyYWindowFromLattice(inLattice, window, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeXYZSize, windowIndex, windowYIndex);
__syncthreads();
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[LS_Y_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Make the choices.
makeYDiffusionChoices(window, choices, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeXYSize, windowIndex, windowYIndex, timestepHash);
__syncthreads();
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, window, choices, latticeIndex, latticeXYZSize, windowIndex-LS_Y_BLOCK_X_SIZE, windowIndex, windowIndex+LS_Y_BLOCK_X_SIZE, LS_Y_WINDOW_SIZE, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void __launch_bounds__(LS_Z_BLOCK_X_SIZE*LS_Z_BLOCK_Z_SIZE,1) z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeZSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z;
unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowZIndex = threadIdx.z+LS_APRON_SIZE;
unsigned int windowIndex = (windowZIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Z_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyZWindowFromLattice(inLattice, window, latticeIndex, latticeZIndex, latticeZSize, latticeXYSize, latticeXYZSize, windowIndex, windowZIndex);
__syncthreads();
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[LS_Z_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Make the choices.
makeZDiffusionChoices(window, choices, latticeIndex, latticeZIndex, latticeZSize, latticeXYSize, latticeXYZSize, windowIndex, windowZIndex, timestepHash);
__syncthreads();
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, window, choices, latticeIndex, latticeXYZSize, windowIndex-LS_Z_BLOCK_X_SIZE, windowIndex, windowIndex+LS_Z_BLOCK_X_SIZE, LS_Z_WINDOW_SIZE, siteOverflowList);
}
| 7b545e1b3a2cb71ced83f35c4cec9f12e89d9745.cu | /*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <stdint.h>
#include <cuda.h>
#include "lptf/Profile.h"
#include "lm/Cuda.h"
#include "lm/Math.h"
#include "TimingConstants.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 1
#if !defined LS_X_BLOCK_MAX_X_SIZE
#define LS_X_BLOCK_MAX_X_SIZE 256
#endif
#if !defined LS_Y_BLOCK_X_SIZE
#define LS_Y_BLOCK_X_SIZE 32
#endif
#if !defined LS_Y_BLOCK_Y_SIZE
#define LS_Y_BLOCK_Y_SIZE 4
#endif
#if !defined LS_Z_BLOCK_X_SIZE
#define LS_Z_BLOCK_X_SIZE 32
#endif
#if !defined LS_Z_BLOCK_Z_SIZE
#define LS_Z_BLOCK_Z_SIZE 4
#endif
#define LS_PACKED_SITES
#define LS_PACKED_LAST_OBJECT_MASK 0xFF000000
#define MPD_MAX_PARTICLE_OVERFLOWS 512
#define MPD_OVERFLOW_LIST_ENTRIES 1+2*MPD_MAX_PARTICLE_OVERFLOWS
#include "lm/rdme/dev/xor_random_dev.cu"
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
#include "lm/rdme/dev/byte_diffusion_1d_dev.cu"
// Allocate the profile space.
PROF_ALLOC;
#define X_SIZE 128
#define Y_SIZE 128
#define Z_SIZE 64
#define PARTICLE_COUNT 216720 // 1 mM
__global__ void x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
__global__ void z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeZSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList);
void runTimestep(cudaStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed) throw(lm::CUDAException);
int main(int argc, char **argv)
{
try
{
PROF_INIT;
PROF_BEGIN(PROF_MAIN_RUN);
// Allocate the cuda resources.
cudaStream_t stream;
unsigned int* startLattice;
unsigned int* startLatticeCounts;
void* inLattice;
void* outLattice;
void* overflowList;
startLattice = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE];
startLatticeCounts = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE];
memset(startLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int));
memset(startLatticeCounts, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(unsigned int));
CUDA_EXCEPTION_CHECK(cudaStreamCreate(&stream));
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&overflowList, MPD_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Fill in some random particles.
srand(2010);
for (unsigned int i=0; i<PARTICLE_COUNT; i++)
{
unsigned int r = (unsigned int)((((double)rand())/((double)RAND_MAX))*((double)X_SIZE)*((double)Y_SIZE)*((double)Z_SIZE));
if (startLatticeCounts[r] < 4)
{
((unsigned char*)&startLattice[r])[startLatticeCounts[r]] = (rand()%255)+1;
startLatticeCounts[r]++;
}
else if (startLatticeCounts[r] < 8)
{
((unsigned char*)&startLattice[r+(X_SIZE*Y_SIZE*Z_SIZE)])[startLatticeCounts[r]] = (rand()%255)+1;
startLatticeCounts[r]++;
}
else
{
printf("Warning: skipped adding particle to fully occupied site.\n");
}
}
// Start timings the kernels.
PROF_BEGIN(PROF_SUBMIT_KERNELS);
PROF_CUDA_START(stream);
// Launch the kernels.
int NUM_LAUNCHES=100;
for (int i=0; i<NUM_LAUNCHES; i++)
{
// Reset the memory.
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, startLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemset(overflowList, 0, MPD_OVERFLOW_LIST_ENTRIES*sizeof(unsigned int)));
// Run the timestep.
PROF_CUDA_BEGIN(PROF_TIMESTEP_RUNNING,stream);
runTimestep(stream, inLattice, outLattice, overflowList, 1, 2, 3);
PROF_CUDA_END(PROF_TIMESTEP_RUNNING,stream);
}
// Wait for all of the kernels to finish.
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(stream));
// Record the timings.
PROF_CUDA_FINISH(stream);
CUDA_EXCEPTION_CHECK(cudaFree(overflowList));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
delete[] startLatticeCounts;
delete[] startLattice;
CUDA_EXCEPTION_CHECK(cudaStreamDestroy(stream));
PROF_END(PROF_SUBMIT_KERNELS);
printf("Profile file saved as: %s\n",PROF_MAKE_STR(PROF_OUT_FILE));
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return 0;
}
catch (lm::CUDAException& e)
{
std::cerr << "CUDA Exception during execution: " << e.what() << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Exception during execution: " << e.what() << std::endl;
}
catch (...)
{
std::cerr << "Unknown Exception during execution." << std::endl;
}
PROF_END(PROF_MAIN_RUN);
PROF_WRITE;
return -1;
}
void runTimestep(cudaStream_t stream, void* inLattice, void* outLattice, void* siteOverflowList, uint64_t xseed, uint64_t yseed, uint64_t zseed)
throw(lm::CUDAException)
{
// Calculate some properties of the lattice.
const unsigned int latticeXSize = X_SIZE;
const unsigned int latticeYSize = Y_SIZE;
const unsigned int latticeZSize = Z_SIZE;
const unsigned int latticeXYSize = X_SIZE*Y_SIZE;
const unsigned int latticeXYZSize = X_SIZE*Y_SIZE*Z_SIZE;
// Execute the kernel for the x direction.
PROF_CUDA_BEGIN(PROF_X_DIFFUSION,stream);
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
if (!calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeXSize, latticeZSize))
throw lm::InvalidArgException("Unable to calculate correct x launch parameters, the lattice size is incompatible.");
CUDA_EXCEPTION_EXECUTE((x_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, xseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_X_DIFFUSION,stream);
// Execute the kernel for the y direction.
PROF_CUDA_BEGIN(PROF_Y_DIFFUSION,stream);
if (!calculateYLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_Y_BLOCK_X_SIZE, LS_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize))
throw lm::InvalidArgException("Unable to calculate correct y launch parameters, the lattice size is incompatible.");
CUDA_EXCEPTION_EXECUTE((y_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int*)outLattice, (unsigned int*)inLattice, gridXSize, latticeXSize, latticeYSize, latticeXYSize, latticeXYZSize, yseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Y_DIFFUSION,stream);
// Execute the kernel for the z direction.
PROF_CUDA_BEGIN(PROF_Z_DIFFUSION,stream);
if (!calculateZLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_Z_BLOCK_X_SIZE, LS_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize))
throw lm::InvalidArgException("Unable to calculate correct z launch parameters, the lattice size is incompatible.");
CUDA_EXCEPTION_EXECUTE((z_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeZSize, latticeXYSize, latticeXYZSize, zseed, (unsigned int*)siteOverflowList)));
PROF_CUDA_END(PROF_Z_DIFFUSION,stream);
}
__global__ void __launch_bounds__(LS_X_BLOCK_MAX_X_SIZE,1) x_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
__syncthreads();
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Make the choices.
makeXDiffusionChoices(window, choices, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex, blockDim.x, timestepHash);
__syncthreads();
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Propagate the choices to the new lattice segment.
performPropagation(outLattice, window, choices, latticeIndex, latticeXYZSize, windowIndex-1, windowIndex, windowIndex+1, LS_X_WINDOW_SIZE, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void __launch_bounds__(LS_Y_BLOCK_X_SIZE*LS_Y_BLOCK_Y_SIZE,1) y_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y;
unsigned int latticeIndex = (bz*latticeXYSize) + (latticeYIndex*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowYIndex = threadIdx.y+LS_APRON_SIZE;
unsigned int windowIndex = (windowYIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Y_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyYWindowFromLattice(inLattice, window, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeXYZSize, windowIndex, windowYIndex);
__syncthreads();
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[LS_Y_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Make the choices.
makeYDiffusionChoices(window, choices, latticeIndex, latticeYIndex, latticeXSize, latticeYSize, latticeXYSize, windowIndex, windowYIndex, timestepHash);
__syncthreads();
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, window, choices, latticeIndex, latticeXYZSize, windowIndex-LS_Y_BLOCK_X_SIZE, windowIndex, windowIndex+LS_Y_BLOCK_X_SIZE, LS_Y_WINDOW_SIZE, siteOverflowList);
}
/**
* Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice
* site, storing the new lattice into shared memory, and then updating the global lattice.
*/
__global__ void __launch_bounds__(LS_Z_BLOCK_X_SIZE*LS_Z_BLOCK_Z_SIZE,1) z_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeZSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash, unsigned int* siteOverflowList)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z;
unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x;
unsigned int windowZIndex = threadIdx.z+LS_APRON_SIZE;
unsigned int windowIndex = (windowZIndex*blockDim.x) + threadIdx.x;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_Z_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyZWindowFromLattice(inLattice, window, latticeIndex, latticeZIndex, latticeZSize, latticeXYSize, latticeXYZSize, windowIndex, windowZIndex);
__syncthreads();
////////////////////////////////////////
// Make the choice for each particle. //
////////////////////////////////////////
__shared__ unsigned int choices[LS_Z_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Make the choices.
makeZDiffusionChoices(window, choices, latticeIndex, latticeZIndex, latticeZSize, latticeXYSize, latticeXYZSize, windowIndex, windowZIndex, timestepHash);
__syncthreads();
//////////////////////////////////////////////////////////
// Create version of the lattice at the next time step. //
//////////////////////////////////////////////////////////
// Progate the choices to the new lattice segment.
performPropagation(outLattice, window, choices, latticeIndex, latticeXYZSize, windowIndex-LS_Z_BLOCK_X_SIZE, windowIndex, windowIndex+LS_Z_BLOCK_X_SIZE, LS_Z_WINDOW_SIZE, siteOverflowList);
}
|
1fd343be51e9064d429af22af93f5d3e75f3cbba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Usage: nvcc -lcublas -DPRINT_RES -DPRINT_PERF gemm.cu
#include "rocblas.h"
#include "hip/library_types.h"
#include <stdio.h>
#include "iostream"
#define NUM_THREADS_PER_BLOCK 1024
#define vec 4
using namespace std;
__global__ void init_a_b(half *a, half *b, int M, int N, int K) {
int c16 = 5;
for(int i = 0; i < M; i++){
for(int j = 0; j < K; j++){
int im = i % c16;
int jm = j % c16;
int add = im + jm;
int am = add % c16;
float resf = (float) am;
half sum = __float2half_rd(resf);
a[i * K + j] = sum;
}
}
for(int i = 0; i < K; i++){
for (int j = 0; j < N; j++){
int im = i % c16;
int jm = j % c16;
int add = im + jm;
int am = add % c16;
float resf = (float) am;
half sum = __float2half_rd(resf);
b[i * N + j] = sum;
}
}
}
__global__ void init_c(float *c_float, int M, int N) {
for (int t = 0; t < M * N; t++) {
c_float[t] = 0.0f;
}
}
__global__ void init_c_cst(float *c_float, int M, int N) {
int c16 = 5;
for(int i = 0; i < M; i++){
for(int j = 0; j < N; j++){
int im = i % c16;
int jm = j % c16;
int add = im + jm;
int am = add % c16;
float resf = (float) am;
c_float[i * N + j] = resf;
}
}
}
void print_res_host(float * arr, int m, int n) {
std::cout << "[";
for(int i = 0; i < m; i++){
if(i == 0)
std::cout << "[";
else
std::cout << " [";
for (int j = 0; j < n; j++){
if(j == 0)
std::cout << (arr[i * n + j]);
else
std::cout<<", "<< (arr[i * n + j]);
}
if(i == m - 1)
std::cout << "]";
else
std::cout << "], "<<std::endl;
}
std::cout << "]";
}
__global__ void matAdd(float *c, float* c_cst, int m, int n){
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) {
// Calculate this block's starting address.
float *base = c + (i * vec);
float4 *cGmem = (float4*)base;
float4 cData = *(cGmem);
float *cst_base = c_cst + (i * vec);
float4 *cst_cGmem = (float4*)cst_base;
float4 cst_cData = *(cst_cGmem);
cData.w = cData.w + cst_cData.w;
cData.x = cData.x + cst_cData.x;
cData.y = cData.y + cst_cData.y;
cData.z = cData.z + cst_cData.z;
*(cGmem) = cData;
}
}
__global__ void pwRelu(float *c, int m, int n){
float cutoff = 0;
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) {
// Calculate this block's starting address.
float *base = c + (i * vec);
float4 *cGmem = (float4*)base;
float4 cData = *(cGmem);
if(cData.w < cutoff)
cData.w = 0;
if(cData.x< cutoff)
cData.x = 0;
if(cData.y< cutoff)
cData.y = 0;
if(cData.z< cutoff)
cData.z = 0;
*(cGmem) = cData;
}
}
int main(int argc, char **argv)
{
if(argc != 5){
printf("Specify problem sizes as ./gemm m n k num_iters\n");
return 0;
}
int M = std::atoi(argv[1]);
int N = std::atoi(argv[2]);
int K = std::atoi(argv[3]);
int num_iters = std::atoi(argv[4]);
hipblasHandle_t handle;
hipblasCreate(&handle);
half *A, *B;
float *C, *C_cst;
hipMalloc(&A, M * K * sizeof(half));
hipMalloc(&B, K * N * sizeof(half));
hipMalloc(&C, M * N * sizeof(float));
hipMalloc(&C_cst, M * N * sizeof(float));
float alpha = 1.0;
float beta = 1.0;
hipLaunchKernelGGL(( init_a_b), dim3(1), dim3(1), 0, 0, A, B, M, N, K);
hipLaunchKernelGGL(( init_c), dim3(1), dim3(1), 0, 0, C, M, N);
hipLaunchKernelGGL(( init_c_cst), dim3(1), dim3(1), 0, 0, C_cst, M, N);
// Warmup iterations.
for(int i = 0; i < 5; ++i){
if (HIPBLAS_STATUS_SUCCESS != hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, B, HIP_R_16F, N, A, HIP_R_16F, K, &beta, C, HIP_R_32F, N, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) {
printf("hipblasGemmEx failed\n");
exit(-1);
}
}
// Profiling iterations.
hipblasHandle_t cublasHandle;
hipblasCreate(&cublasHandle);
cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH);
float aggregateTime = 0.0f;
for(int i = 0; i < num_iters; ++i){
float ms = 0.0f;
hipLaunchKernelGGL(( init_c), dim3(1), dim3(1), 0, 0, C, M, N);
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
if (HIPBLAS_STATUS_SUCCESS != hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, B, HIP_R_16F, N, A, HIP_R_16F, K, &beta, C, HIP_R_32F, N, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) {
printf("hipblasGemmEx failed\n");
exit(-1);
}
dim3 block(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 grid(((M * N) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec), 1, 1);
hipLaunchKernelGGL(( matAdd), dim3(grid), dim3(block), 0, 0, C, C_cst, M, N);
hipLaunchKernelGGL(( pwRelu), dim3(grid), dim3(block), 0, 0, C, M, N);
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&ms, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
aggregateTime += ms;
}
#ifdef PRINT_PERF
float avg_time = ((aggregateTime / num_iters) / 1000.0f);
float ops = (float)M * (float)N * (float)K * 2.0f;
float tflops = (ops * 1.0e-12f) / (avg_time);
fprintf(stderr, "m:%d, n:%d, k:%d, ", M, N, K);
fprintf(stderr, "%f TFLOPS\n", tflops);
#endif
#ifdef PRINT_RES
float * C_host;
C_host = (float*)malloc(M * N * sizeof(float));
hipDeviceSynchronize();
hipMemcpy(C_host, C, M * N * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
print_res_host(C_host, M, N);
free(C_host);
#endif
hipFree(A);
hipFree(B);
hipFree(C);
return 0;
}
| 1fd343be51e9064d429af22af93f5d3e75f3cbba.cu | // Usage: nvcc -lcublas -DPRINT_RES -DPRINT_PERF gemm.cu
#include "cublas_v2.h"
#include "library_types.h"
#include <stdio.h>
#include "iostream"
#define NUM_THREADS_PER_BLOCK 1024
#define vec 4
using namespace std;
__global__ void init_a_b(half *a, half *b, int M, int N, int K) {
int c16 = 5;
for(int i = 0; i < M; i++){
for(int j = 0; j < K; j++){
int im = i % c16;
int jm = j % c16;
int add = im + jm;
int am = add % c16;
float resf = (float) am;
half sum = __float2half_rd(resf);
a[i * K + j] = sum;
}
}
for(int i = 0; i < K; i++){
for (int j = 0; j < N; j++){
int im = i % c16;
int jm = j % c16;
int add = im + jm;
int am = add % c16;
float resf = (float) am;
half sum = __float2half_rd(resf);
b[i * N + j] = sum;
}
}
}
__global__ void init_c(float *c_float, int M, int N) {
for (int t = 0; t < M * N; t++) {
c_float[t] = 0.0f;
}
}
__global__ void init_c_cst(float *c_float, int M, int N) {
int c16 = 5;
for(int i = 0; i < M; i++){
for(int j = 0; j < N; j++){
int im = i % c16;
int jm = j % c16;
int add = im + jm;
int am = add % c16;
float resf = (float) am;
c_float[i * N + j] = resf;
}
}
}
void print_res_host(float * arr, int m, int n) {
std::cout << "[";
for(int i = 0; i < m; i++){
if(i == 0)
std::cout << "[";
else
std::cout << " [";
for (int j = 0; j < n; j++){
if(j == 0)
std::cout << (arr[i * n + j]);
else
std::cout<<", "<< (arr[i * n + j]);
}
if(i == m - 1)
std::cout << "]";
else
std::cout << "], "<<std::endl;
}
std::cout << "]";
}
__global__ void matAdd(float *c, float* c_cst, int m, int n){
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) {
// Calculate this block's starting address.
float *base = c + (i * vec);
float4 *cGmem = (float4*)base;
float4 cData = *(cGmem);
float *cst_base = c_cst + (i * vec);
float4 *cst_cGmem = (float4*)cst_base;
float4 cst_cData = *(cst_cGmem);
cData.w = cData.w + cst_cData.w;
cData.x = cData.x + cst_cData.x;
cData.y = cData.y + cst_cData.y;
cData.z = cData.z + cst_cData.z;
*(cGmem) = cData;
}
}
__global__ void pwRelu(float *c, int m, int n){
float cutoff = 0;
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) {
// Calculate this block's starting address.
float *base = c + (i * vec);
float4 *cGmem = (float4*)base;
float4 cData = *(cGmem);
if(cData.w < cutoff)
cData.w = 0;
if(cData.x< cutoff)
cData.x = 0;
if(cData.y< cutoff)
cData.y = 0;
if(cData.z< cutoff)
cData.z = 0;
*(cGmem) = cData;
}
}
int main(int argc, char **argv)
{
if(argc != 5){
printf("Specify problem sizes as ./gemm m n k num_iters\n");
return 0;
}
int M = std::atoi(argv[1]);
int N = std::atoi(argv[2]);
int K = std::atoi(argv[3]);
int num_iters = std::atoi(argv[4]);
cublasHandle_t handle;
cublasCreate(&handle);
half *A, *B;
float *C, *C_cst;
cudaMalloc(&A, M * K * sizeof(half));
cudaMalloc(&B, K * N * sizeof(half));
cudaMalloc(&C, M * N * sizeof(float));
cudaMalloc(&C_cst, M * N * sizeof(float));
float alpha = 1.0;
float beta = 1.0;
init_a_b<<<1, 1>>>(A, B, M, N, K);
init_c<<<1, 1>>>(C, M, N);
init_c_cst<<<1, 1>>>(C_cst, M, N);
// Warmup iterations.
for(int i = 0; i < 5; ++i){
if (CUBLAS_STATUS_SUCCESS != cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, CUDA_R_16F, N, A, CUDA_R_16F, K, &beta, C, CUDA_R_32F, N, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) {
printf("cublasGemmEx failed\n");
exit(-1);
}
}
// Profiling iterations.
cublasHandle_t cublasHandle;
cublasCreate(&cublasHandle);
cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH);
float aggregateTime = 0.0f;
for(int i = 0; i < num_iters; ++i){
float ms = 0.0f;
init_c<<<1, 1>>>(C, M, N);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
if (CUBLAS_STATUS_SUCCESS != cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, CUDA_R_16F, N, A, CUDA_R_16F, K, &beta, C, CUDA_R_32F, N, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) {
printf("cublasGemmEx failed\n");
exit(-1);
}
dim3 block(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 grid(((M * N) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec), 1, 1);
matAdd<<<grid, block>>>(C, C_cst, M, N);
pwRelu<<<grid, block>>>(C, M, N);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&ms, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
aggregateTime += ms;
}
#ifdef PRINT_PERF
float avg_time = ((aggregateTime / num_iters) / 1000.0f);
float ops = (float)M * (float)N * (float)K * 2.0f;
float tflops = (ops * 1.0e-12f) / (avg_time);
fprintf(stderr, "m:%d, n:%d, k:%d, ", M, N, K);
fprintf(stderr, "%f TFLOPS\n", tflops);
#endif
#ifdef PRINT_RES
float * C_host;
C_host = (float*)malloc(M * N * sizeof(float));
cudaDeviceSynchronize();
cudaMemcpy(C_host, C, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
print_res_host(C_host, M, N);
free(C_host);
#endif
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
3ff3f15edaf6efe3d148f0bce4a4b69ec8607910.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvtext/edit_distance.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Compute the Levenshtein distance for each string pair
*
* Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm
* And here: https://en.wikipedia.org/wiki/Levenshtein_distance
*
* @param d_str First string
* @param d_tgt Second string
* @param buffer Working buffer for intermediate calculations
* @return The edit distance value
*/
__device__ cudf::size_type compute_distance(cudf::string_view const& d_str,
cudf::string_view const& d_tgt,
cudf::size_type* buffer)
{
auto const str_length = d_str.length();
auto const tgt_length = d_tgt.length();
if (str_length == 0) return tgt_length;
if (tgt_length == 0) return str_length;
auto begin = str_length < tgt_length ? d_str.begin() : d_tgt.begin();
auto itr = str_length < tgt_length ? d_tgt.begin() : d_str.begin();
// .first is min and .second is max
auto const [n, m] = std::minmax(str_length, tgt_length);
// setup compute buffer pointers
auto v0 = buffer;
auto v1 = v0 + n + 1;
// initialize v0
thrust::sequence(thrust::seq, v0, v1);
for (int i = 0; i < m; ++i, ++itr) {
auto itr_tgt = begin;
v1[0] = i + 1;
for (int j = 0; j < n; ++j, ++itr_tgt) {
auto sub_cost = v0[j] + (*itr != *itr_tgt);
auto del_cost = v0[j + 1] + 1;
auto ins_cost = v1[j] + 1;
v1[j + 1] = ::min(::min(sub_cost, del_cost), ins_cost);
}
thrust::swap(v0, v1);
}
return v0[n];
}
struct edit_distance_levenshtein_algorithm {
cudf::column_device_view d_strings; // computing these
cudf::column_device_view d_targets; // against these;
cudf::size_type* d_buffer; // compute buffer for each string
std::ptrdiff_t const* d_offsets; // locate sub-buffer for each string
cudf::size_type* d_results; // edit distance values
__device__ void operator()(cudf::size_type idx) const
{
auto d_str =
d_strings.is_null(idx) ? cudf::string_view{} : d_strings.element<cudf::string_view>(idx);
auto d_tgt = [&] __device__ { // d_targets is also allowed to have only one entry
if (d_targets.is_null(idx)) { return cudf::string_view{}; }
return d_targets.size() == 1 ? d_targets.element<cudf::string_view>(0)
: d_targets.element<cudf::string_view>(idx);
}();
d_results[idx] = compute_distance(d_str, d_tgt, d_buffer + d_offsets[idx]);
}
};
struct edit_distance_matrix_levenshtein_algorithm {
cudf::column_device_view d_strings; // computing these against itself
cudf::size_type* d_buffer; // compute buffer for each string
std::ptrdiff_t const* d_offsets; // locate sub-buffer for each string
cudf::size_type* d_results; // edit distance values
__device__ void operator()(cudf::size_type idx) const
{
auto const strings_count = d_strings.size();
auto const row = idx / strings_count;
auto const col = idx % strings_count;
if (row > col) return; // bottom half is computed with the top half of matrix
cudf::string_view d_str1 =
d_strings.is_null(row) ? cudf::string_view{} : d_strings.element<cudf::string_view>(row);
cudf::string_view d_str2 =
d_strings.is_null(col) ? cudf::string_view{} : d_strings.element<cudf::string_view>(col);
auto work_buffer = d_buffer + d_offsets[idx - ((row + 1) * (row + 2)) / 2];
auto const distance = (row == col) ? 0 : compute_distance(d_str1, d_str2, work_buffer);
d_results[idx] = distance; // top half of matrix
d_results[col * strings_count + row] = distance; // bottom half of matrix
}
};
} // namespace
/**
* @copydoc nvtext::edit_distance
*/
std::unique_ptr<cudf::column> edit_distance(cudf::strings_column_view const& strings,
cudf::strings_column_view const& targets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const strings_count = strings.size();
if (strings_count == 0) {
return cudf::make_empty_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()});
}
if (targets.size() > 1) {
CUDF_EXPECTS(strings_count == targets.size(), "targets.size() must equal strings.size()");
}
// create device columns from the input columns
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto targets_column = cudf::column_device_view::create(targets.parent(), stream);
auto d_targets = *targets_column;
// calculate the size of the compute-buffer;
rmm::device_uvector<std::ptrdiff_t> offsets(strings_count, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
offsets.begin(),
[d_strings, d_targets] __device__(auto idx) {
if (d_strings.is_null(idx) || d_targets.is_null(idx)) {
return cudf::size_type{0};
}
auto d_str = d_strings.element<cudf::string_view>(idx);
auto d_tgt = d_targets.size() == 1
? d_targets.element<cudf::string_view>(0)
: d_targets.element<cudf::string_view>(idx);
// just need 2 integers for each character of the shorter string
return (::min(d_str.length(), d_tgt.length()) + 1) * 2;
});
// get the total size of the temporary compute buffer
int64_t compute_size =
thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), int64_t{0});
// convert sizes to offsets in-place
thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin());
// create the temporary compute buffer
rmm::device_uvector<cudf::size_type> compute_buffer(compute_size, stream);
auto d_buffer = compute_buffer.data();
auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count,
rmm::device_buffer{0, stream, mr},
0,
stream,
mr);
auto d_results = results->mutable_view().data<cudf::size_type>();
// compute the edit distance into the output column
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count,
edit_distance_levenshtein_algorithm{d_strings, d_targets, d_buffer, offsets.data(), d_results});
return results;
}
/**
* @copydoc nvtext::edit_distance_matrix
*/
std::unique_ptr<cudf::column> edit_distance_matrix(cudf::strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::size_type strings_count = strings.size();
if (strings_count == 0) {
return cudf::make_empty_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()});
}
CUDF_EXPECTS(strings_count > 1, "the input strings must include at least 2 strings");
CUDF_EXPECTS(static_cast<size_t>(strings_count) * static_cast<size_t>(strings_count) <
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>().max()),
"too many strings to create the output column");
// create device column of the input strings column
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// Calculate the size of the compute-buffer.
// We only need memory for half the size of the output matrix since the edit distance calculation
// is commutative -- `distance(strings[i],strings[j]) == distance(strings[j],strings[i])`
cudf::size_type n_upper = (strings_count * (strings_count - 1)) / 2;
rmm::device_uvector<std::ptrdiff_t> offsets(n_upper, stream);
auto d_offsets = offsets.data();
CUDF_CUDA_TRY(hipMemsetAsync(d_offsets, 0, n_upper * sizeof(cudf::size_type), stream.value()));
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count * strings_count,
[d_strings, d_offsets, strings_count] __device__(cudf::size_type idx) {
auto const row = idx / strings_count;
auto const col = idx % strings_count;
if (row >= col) return; // compute only the top half
cudf::string_view const d_str1 =
d_strings.is_null(row) ? cudf::string_view{} : d_strings.element<cudf::string_view>(row);
cudf::string_view const d_str2 =
d_strings.is_null(col) ? cudf::string_view{} : d_strings.element<cudf::string_view>(col);
if (d_str1.empty() || d_str2.empty()) { return; }
// the temp size needed is 2 integers per character of the shorter string
d_offsets[idx - ((row + 1) * (row + 2)) / 2] =
(::min(d_str1.length(), d_str2.length()) + 1) * 2;
});
// get the total size for the compute buffer
int64_t compute_size =
thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), int64_t{0});
// convert sizes to offsets in-place
thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin());
// create the compute buffer
rmm::device_uvector<cudf::size_type> compute_buffer(compute_size, stream);
auto d_buffer = compute_buffer.data();
// compute the edit distance into the output column
auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count * strings_count,
rmm::device_buffer{0, stream, mr},
0,
stream,
mr);
auto d_results = results->mutable_view().data<cudf::size_type>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count * strings_count,
edit_distance_matrix_levenshtein_algorithm{d_strings, d_buffer, d_offsets, d_results});
// build a lists column of the results
auto offsets_column =
cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count + 1,
rmm::device_buffer{0, stream, mr},
0,
stream,
mr);
thrust::transform_exclusive_scan(
rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(strings_count + 1),
offsets_column->mutable_view().data<cudf::size_type>(),
[strings_count] __device__(auto idx) { return strings_count; },
cudf::size_type{0},
thrust::plus<cudf::size_type>());
return cudf::make_lists_column(strings_count,
std::move(offsets_column),
std::move(results),
0, // no nulls
rmm::device_buffer{0, stream, mr},
stream,
mr);
}
} // namespace detail
// external APIs
/**
* @copydoc nvtext::edit_distance
*/
std::unique_ptr<cudf::column> edit_distance(cudf::strings_column_view const& strings,
cudf::strings_column_view const& targets,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::edit_distance(strings, targets, cudf::get_default_stream(), mr);
}
/**
* @copydoc nvtext::edit_distance_matrix
*/
std::unique_ptr<cudf::column> edit_distance_matrix(cudf::strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::edit_distance_matrix(strings, cudf::get_default_stream(), mr);
}
} // namespace nvtext
| 3ff3f15edaf6efe3d148f0bce4a4b69ec8607910.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvtext/edit_distance.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Compute the Levenshtein distance for each string pair
*
* Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm
* And here: https://en.wikipedia.org/wiki/Levenshtein_distance
*
* @param d_str First string
* @param d_tgt Second string
* @param buffer Working buffer for intermediate calculations
* @return The edit distance value
*/
__device__ cudf::size_type compute_distance(cudf::string_view const& d_str,
cudf::string_view const& d_tgt,
cudf::size_type* buffer)
{
auto const str_length = d_str.length();
auto const tgt_length = d_tgt.length();
if (str_length == 0) return tgt_length;
if (tgt_length == 0) return str_length;
auto begin = str_length < tgt_length ? d_str.begin() : d_tgt.begin();
auto itr = str_length < tgt_length ? d_tgt.begin() : d_str.begin();
// .first is min and .second is max
auto const [n, m] = std::minmax(str_length, tgt_length);
// setup compute buffer pointers
auto v0 = buffer;
auto v1 = v0 + n + 1;
// initialize v0
thrust::sequence(thrust::seq, v0, v1);
for (int i = 0; i < m; ++i, ++itr) {
auto itr_tgt = begin;
v1[0] = i + 1;
for (int j = 0; j < n; ++j, ++itr_tgt) {
auto sub_cost = v0[j] + (*itr != *itr_tgt);
auto del_cost = v0[j + 1] + 1;
auto ins_cost = v1[j] + 1;
v1[j + 1] = std::min(std::min(sub_cost, del_cost), ins_cost);
}
thrust::swap(v0, v1);
}
return v0[n];
}
struct edit_distance_levenshtein_algorithm {
cudf::column_device_view d_strings; // computing these
cudf::column_device_view d_targets; // against these;
cudf::size_type* d_buffer; // compute buffer for each string
std::ptrdiff_t const* d_offsets; // locate sub-buffer for each string
cudf::size_type* d_results; // edit distance values
__device__ void operator()(cudf::size_type idx) const
{
auto d_str =
d_strings.is_null(idx) ? cudf::string_view{} : d_strings.element<cudf::string_view>(idx);
auto d_tgt = [&] __device__ { // d_targets is also allowed to have only one entry
if (d_targets.is_null(idx)) { return cudf::string_view{}; }
return d_targets.size() == 1 ? d_targets.element<cudf::string_view>(0)
: d_targets.element<cudf::string_view>(idx);
}();
d_results[idx] = compute_distance(d_str, d_tgt, d_buffer + d_offsets[idx]);
}
};
struct edit_distance_matrix_levenshtein_algorithm {
cudf::column_device_view d_strings; // computing these against itself
cudf::size_type* d_buffer; // compute buffer for each string
std::ptrdiff_t const* d_offsets; // locate sub-buffer for each string
cudf::size_type* d_results; // edit distance values
__device__ void operator()(cudf::size_type idx) const
{
auto const strings_count = d_strings.size();
auto const row = idx / strings_count;
auto const col = idx % strings_count;
if (row > col) return; // bottom half is computed with the top half of matrix
cudf::string_view d_str1 =
d_strings.is_null(row) ? cudf::string_view{} : d_strings.element<cudf::string_view>(row);
cudf::string_view d_str2 =
d_strings.is_null(col) ? cudf::string_view{} : d_strings.element<cudf::string_view>(col);
auto work_buffer = d_buffer + d_offsets[idx - ((row + 1) * (row + 2)) / 2];
auto const distance = (row == col) ? 0 : compute_distance(d_str1, d_str2, work_buffer);
d_results[idx] = distance; // top half of matrix
d_results[col * strings_count + row] = distance; // bottom half of matrix
}
};
} // namespace
/**
* @copydoc nvtext::edit_distance
*/
std::unique_ptr<cudf::column> edit_distance(cudf::strings_column_view const& strings,
cudf::strings_column_view const& targets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const strings_count = strings.size();
if (strings_count == 0) {
return cudf::make_empty_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()});
}
if (targets.size() > 1) {
CUDF_EXPECTS(strings_count == targets.size(), "targets.size() must equal strings.size()");
}
// create device columns from the input columns
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto targets_column = cudf::column_device_view::create(targets.parent(), stream);
auto d_targets = *targets_column;
// calculate the size of the compute-buffer;
rmm::device_uvector<std::ptrdiff_t> offsets(strings_count, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
offsets.begin(),
[d_strings, d_targets] __device__(auto idx) {
if (d_strings.is_null(idx) || d_targets.is_null(idx)) {
return cudf::size_type{0};
}
auto d_str = d_strings.element<cudf::string_view>(idx);
auto d_tgt = d_targets.size() == 1
? d_targets.element<cudf::string_view>(0)
: d_targets.element<cudf::string_view>(idx);
// just need 2 integers for each character of the shorter string
return (std::min(d_str.length(), d_tgt.length()) + 1) * 2;
});
// get the total size of the temporary compute buffer
int64_t compute_size =
thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), int64_t{0});
// convert sizes to offsets in-place
thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin());
// create the temporary compute buffer
rmm::device_uvector<cudf::size_type> compute_buffer(compute_size, stream);
auto d_buffer = compute_buffer.data();
auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count,
rmm::device_buffer{0, stream, mr},
0,
stream,
mr);
auto d_results = results->mutable_view().data<cudf::size_type>();
// compute the edit distance into the output column
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count,
edit_distance_levenshtein_algorithm{d_strings, d_targets, d_buffer, offsets.data(), d_results});
return results;
}
/**
* @copydoc nvtext::edit_distance_matrix
*/
std::unique_ptr<cudf::column> edit_distance_matrix(cudf::strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::size_type strings_count = strings.size();
if (strings_count == 0) {
return cudf::make_empty_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()});
}
CUDF_EXPECTS(strings_count > 1, "the input strings must include at least 2 strings");
CUDF_EXPECTS(static_cast<size_t>(strings_count) * static_cast<size_t>(strings_count) <
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>().max()),
"too many strings to create the output column");
// create device column of the input strings column
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// Calculate the size of the compute-buffer.
// We only need memory for half the size of the output matrix since the edit distance calculation
// is commutative -- `distance(strings[i],strings[j]) == distance(strings[j],strings[i])`
cudf::size_type n_upper = (strings_count * (strings_count - 1)) / 2;
rmm::device_uvector<std::ptrdiff_t> offsets(n_upper, stream);
auto d_offsets = offsets.data();
CUDF_CUDA_TRY(cudaMemsetAsync(d_offsets, 0, n_upper * sizeof(cudf::size_type), stream.value()));
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count * strings_count,
[d_strings, d_offsets, strings_count] __device__(cudf::size_type idx) {
auto const row = idx / strings_count;
auto const col = idx % strings_count;
if (row >= col) return; // compute only the top half
cudf::string_view const d_str1 =
d_strings.is_null(row) ? cudf::string_view{} : d_strings.element<cudf::string_view>(row);
cudf::string_view const d_str2 =
d_strings.is_null(col) ? cudf::string_view{} : d_strings.element<cudf::string_view>(col);
if (d_str1.empty() || d_str2.empty()) { return; }
// the temp size needed is 2 integers per character of the shorter string
d_offsets[idx - ((row + 1) * (row + 2)) / 2] =
(std::min(d_str1.length(), d_str2.length()) + 1) * 2;
});
// get the total size for the compute buffer
int64_t compute_size =
thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), int64_t{0});
// convert sizes to offsets in-place
thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin());
// create the compute buffer
rmm::device_uvector<cudf::size_type> compute_buffer(compute_size, stream);
auto d_buffer = compute_buffer.data();
// compute the edit distance into the output column
auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count * strings_count,
rmm::device_buffer{0, stream, mr},
0,
stream,
mr);
auto d_results = results->mutable_view().data<cudf::size_type>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
strings_count * strings_count,
edit_distance_matrix_levenshtein_algorithm{d_strings, d_buffer, d_offsets, d_results});
// build a lists column of the results
auto offsets_column =
cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
strings_count + 1,
rmm::device_buffer{0, stream, mr},
0,
stream,
mr);
thrust::transform_exclusive_scan(
rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(strings_count + 1),
offsets_column->mutable_view().data<cudf::size_type>(),
[strings_count] __device__(auto idx) { return strings_count; },
cudf::size_type{0},
thrust::plus<cudf::size_type>());
return cudf::make_lists_column(strings_count,
std::move(offsets_column),
std::move(results),
0, // no nulls
rmm::device_buffer{0, stream, mr},
stream,
mr);
}
} // namespace detail
// external APIs
/**
* @copydoc nvtext::edit_distance
*/
std::unique_ptr<cudf::column> edit_distance(cudf::strings_column_view const& strings,
cudf::strings_column_view const& targets,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::edit_distance(strings, targets, cudf::get_default_stream(), mr);
}
/**
* @copydoc nvtext::edit_distance_matrix
*/
std::unique_ptr<cudf::column> edit_distance_matrix(cudf::strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::edit_distance_matrix(strings, cudf::get_default_stream(), mr);
}
} // namespace nvtext
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.