hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
cc2cefb6f6625fc16d6acec950b4ca4c21673ac0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zgemm_reduce.cu, normal z -> c, Wed Jan 2 14:18:50 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_cgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void cgemm_reduce_kernel(
int m, int n, int k,
magmaFloatComplex alpha,
const magmaFloatComplex* __restrict__ dA, int lda,
const magmaFloatComplex* __restrict__ dB, int ldb,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ magmaFloatComplex sum[BLK_K][BLK_M+1][BLK_N+1];
magmaFloatComplex lsum;
/* w := v**H * C */
lsum = MAGMA_C_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_C_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_C_EQUAL(beta, MAGMA_C_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
CGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_cgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dB, magma_int_t lddb,
magmaFloatComplex beta,
magmaFloatComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( cgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( cgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
| cc2cefb6f6625fc16d6acec950b4ca4c21673ac0.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zgemm_reduce.cu, normal z -> c, Wed Jan 2 14:18:50 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_cgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void cgemm_reduce_kernel(
int m, int n, int k,
magmaFloatComplex alpha,
const magmaFloatComplex* __restrict__ dA, int lda,
const magmaFloatComplex* __restrict__ dB, int ldb,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ magmaFloatComplex sum[BLK_K][BLK_M+1][BLK_N+1];
magmaFloatComplex lsum;
/* w := v**H * C */
lsum = MAGMA_C_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_C_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_C_EQUAL(beta, MAGMA_C_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
CGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_cgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dB, magma_int_t lddb,
magmaFloatComplex beta,
magmaFloatComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
cgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
cgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
|
72627f8af6955269678b3b389e6ff53cfa79c33f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#if 1
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
#include <hip/device_functions.h>
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n/2 threads
- only works for power-of-2 arrays
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
See the CUDA SDK "reduction" sample for more information.
*/
template <unsigned int blockSize>
__device__ void
reduceBlock(float *sdata, const unsigned int tid)
{
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32 && blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } EMUSYNC;
if (tid < 16 && blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } EMUSYNC;
if (tid < 8 && blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } EMUSYNC;
if (tid < 4 && blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } EMUSYNC;
if (tid < 2 && blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } EMUSYNC;
if (tid < 1 && blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } EMUSYNC;
}
template <unsigned int blockSize, bool nIsPow2>
__device__ void
reduceBlocks(const float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
sdata[tid] += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
sdata[tid] += g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
reduceBlock<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void
reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n)
{
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
}
// Global variable used by reduceSinglePass to count how many blocks have finished
__device__ unsigned int retirementCount = 0;
// This reduction kernel reduces an arbitrary size array in a single kernel invocation
// It does so by keeping track of how many blocks have finished. After each thread
// block completes the reduction of its own block of data, it "takes a ticket" by
// atomically incrementing a global counter. If the ticket value is equal to the number
// of thread blocks, then the block holding the ticket knows that it is the last block
// to finish. This last block is responsible for summing the results of all the other
// blocks.
//
// In order for this to work, we must be sure that before a block takes a ticket, all
// of its memory transactions have completed. This is what __threadfence() does -- it
// blocks until the results of all outstanding memory transactions within the
// calling thread are visible to all other threads.
//
// For more details on the reduction algorithm (notably the multi-pass approach), see
// the "reduction" sample in the CUDA SDK.
template <unsigned int blockSize, bool nIsPow2>
__global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n)
{
//
// PHASE 1: Process all inputs assigned to this block
//
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
//
// PHASE 2: Last block finished will process all partial sums
//
if (gridDim.x > 1)
{
const unsigned int tid = threadIdx.x;
__shared__ bool amLast;
extern float __shared__ smem[];
// wait until all outstanding memory instructions in this thread are finished
__threadfence();
// Thread 0 takes a ticket
if( tid==0 )
{
unsigned int ticket = atomicInc(&retirementCount, gridDim.x);
// If the ticket ID is equal to the number of blocks, we are the last block!
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
// The last block sums the results of all other blocks
if( amLast )
{
// load block results back into shared memory
smem[tid] = (tid < gridDim.x) ? g_odata[tid] : 0;
__syncthreads();
reduceBlock<blockSize>(smem, tid);
if( tid==0 )
{
g_odata[0] = smem[0];
// reset retirement count so that next run succeeds
retirementCount = 0;
}
}
}
}
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
extern "C"
void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(float);
// choose which of the optimized versions of reduction to launch
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduceMultiPass<512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 256:
hipLaunchKernelGGL(( reduceMultiPass<256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 128:
hipLaunchKernelGGL(( reduceMultiPass<128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 64:
hipLaunchKernelGGL(( reduceMultiPass< 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 32:
hipLaunchKernelGGL(( reduceMultiPass< 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 16:
hipLaunchKernelGGL(( reduceMultiPass< 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 8:
hipLaunchKernelGGL(( reduceMultiPass< 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 4:
hipLaunchKernelGGL(( reduceMultiPass< 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 2:
hipLaunchKernelGGL(( reduceMultiPass< 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 1:
hipLaunchKernelGGL(( reduceMultiPass< 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduceMultiPass<512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 256:
hipLaunchKernelGGL(( reduceMultiPass<256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 128:
hipLaunchKernelGGL(( reduceMultiPass<128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 64:
hipLaunchKernelGGL(( reduceMultiPass< 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 32:
hipLaunchKernelGGL(( reduceMultiPass< 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 16:
hipLaunchKernelGGL(( reduceMultiPass< 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 8:
hipLaunchKernelGGL(( reduceMultiPass< 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 4:
hipLaunchKernelGGL(( reduceMultiPass< 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 2:
hipLaunchKernelGGL(( reduceMultiPass< 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 1:
hipLaunchKernelGGL(( reduceMultiPass< 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
}
}
}
extern "C"
void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(float);
// choose which of the optimized versions of reduction to launch
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduceSinglePass<512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 256:
hipLaunchKernelGGL(( reduceSinglePass<256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 128:
hipLaunchKernelGGL(( reduceSinglePass<128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 64:
hipLaunchKernelGGL(( reduceSinglePass< 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 32:
hipLaunchKernelGGL(( reduceSinglePass< 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 16:
hipLaunchKernelGGL(( reduceSinglePass< 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 8:
hipLaunchKernelGGL(( reduceSinglePass< 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 4:
hipLaunchKernelGGL(( reduceSinglePass< 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 2:
hipLaunchKernelGGL(( reduceSinglePass< 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 1:
hipLaunchKernelGGL(( reduceSinglePass< 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduceSinglePass<512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 256:
hipLaunchKernelGGL(( reduceSinglePass<256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 128:
hipLaunchKernelGGL(( reduceSinglePass<128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 64:
hipLaunchKernelGGL(( reduceSinglePass< 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 32:
hipLaunchKernelGGL(( reduceSinglePass< 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 16:
hipLaunchKernelGGL(( reduceSinglePass< 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 8:
hipLaunchKernelGGL(( reduceSinglePass< 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 4:
hipLaunchKernelGGL(( reduceSinglePass< 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 2:
hipLaunchKernelGGL(( reduceSinglePass< 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 1:
hipLaunchKernelGGL(( reduceSinglePass< 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
}
}
}
#endif // #ifndef _REDUCE_KERNEL_H_
| 72627f8af6955269678b3b389e6ff53cfa79c33f.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#if 1
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
#include <device_functions.h>
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n/2 threads
- only works for power-of-2 arrays
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
See the CUDA SDK "reduction" sample for more information.
*/
template <unsigned int blockSize>
__device__ void
reduceBlock(float *sdata, const unsigned int tid)
{
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32 && blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } EMUSYNC;
if (tid < 16 && blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } EMUSYNC;
if (tid < 8 && blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } EMUSYNC;
if (tid < 4 && blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } EMUSYNC;
if (tid < 2 && blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } EMUSYNC;
if (tid < 1 && blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } EMUSYNC;
}
template <unsigned int blockSize, bool nIsPow2>
__device__ void
reduceBlocks(const float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
sdata[tid] += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
sdata[tid] += g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
reduceBlock<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void
reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n)
{
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
}
// Global variable used by reduceSinglePass to count how many blocks have finished
__device__ unsigned int retirementCount = 0;
// This reduction kernel reduces an arbitrary size array in a single kernel invocation
// It does so by keeping track of how many blocks have finished. After each thread
// block completes the reduction of its own block of data, it "takes a ticket" by
// atomically incrementing a global counter. If the ticket value is equal to the number
// of thread blocks, then the block holding the ticket knows that it is the last block
// to finish. This last block is responsible for summing the results of all the other
// blocks.
//
// In order for this to work, we must be sure that before a block takes a ticket, all
// of its memory transactions have completed. This is what __threadfence() does -- it
// blocks until the results of all outstanding memory transactions within the
// calling thread are visible to all other threads.
//
// For more details on the reduction algorithm (notably the multi-pass approach), see
// the "reduction" sample in the CUDA SDK.
template <unsigned int blockSize, bool nIsPow2>
__global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n)
{
//
// PHASE 1: Process all inputs assigned to this block
//
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
//
// PHASE 2: Last block finished will process all partial sums
//
if (gridDim.x > 1)
{
const unsigned int tid = threadIdx.x;
__shared__ bool amLast;
extern float __shared__ smem[];
// wait until all outstanding memory instructions in this thread are finished
__threadfence();
// Thread 0 takes a ticket
if( tid==0 )
{
unsigned int ticket = atomicInc(&retirementCount, gridDim.x);
// If the ticket ID is equal to the number of blocks, we are the last block!
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
// The last block sums the results of all other blocks
if( amLast )
{
// load block results back into shared memory
smem[tid] = (tid < gridDim.x) ? g_odata[tid] : 0;
__syncthreads();
reduceBlock<blockSize>(smem, tid);
if( tid==0 )
{
g_odata[0] = smem[0];
// reset retirement count so that next run succeeds
retirementCount = 0;
}
}
}
}
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
extern "C"
void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(float);
// choose which of the optimized versions of reduction to launch
if (isPow2(size))
{
switch (threads)
{
case 512:
reduceMultiPass<512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
reduceMultiPass<256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
reduceMultiPass<128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
reduceMultiPass< 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
reduceMultiPass< 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
reduceMultiPass< 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
reduceMultiPass< 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
reduceMultiPass< 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
reduceMultiPass< 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
reduceMultiPass< 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
reduceMultiPass<512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
reduceMultiPass<256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
reduceMultiPass<128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
reduceMultiPass< 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
reduceMultiPass< 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
reduceMultiPass< 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
reduceMultiPass< 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
reduceMultiPass< 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
reduceMultiPass< 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
reduceMultiPass< 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
}
extern "C"
void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(float);
// choose which of the optimized versions of reduction to launch
if (isPow2(size))
{
switch (threads)
{
case 512:
reduceSinglePass<512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
reduceSinglePass<256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
reduceSinglePass<128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
reduceSinglePass< 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
reduceSinglePass< 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
reduceSinglePass< 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
reduceSinglePass< 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
reduceSinglePass< 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
reduceSinglePass< 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
reduceSinglePass< 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
reduceSinglePass<512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
reduceSinglePass<256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
reduceSinglePass<128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
reduceSinglePass< 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
reduceSinglePass< 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
reduceSinglePass< 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
reduceSinglePass< 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
reduceSinglePass< 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
reduceSinglePass< 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
reduceSinglePass< 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
}
#endif // #ifndef _REDUCE_KERNEL_H_
|
72e98555e61b56c8d1d92ee56925d65081336bc7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <common/cudart_utils.h>
#include <linalg/cublas_wrappers.h>
#include <linalg/transpose.h>
#include <cuda_utils.cuh>
#include <linalg/subtract.cuh>
#include <random/make_regression.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Random {
template <typename T>
struct MakeRegressionInputs {
T tolerance;
int n_samples, n_features, n_informative, n_targets, effective_rank;
T bias;
bool shuffle;
GeneratorType gtype;
uint64_t seed;
};
template <typename T>
class MakeRegressionTest
: public ::testing::TestWithParam<MakeRegressionInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MakeRegressionInputs<T>>::GetParam();
// Noise must be zero to compare the actual and expected values
T noise = (T)0.0, tail_strength = (T)0.5;
allocator.reset(new defaultDeviceAllocator);
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
CUDA_CHECK(hipStreamCreate(&stream));
allocate(data, params.n_samples * params.n_features);
allocate(values_ret, params.n_samples * params.n_targets);
allocate(values_prod, params.n_samples * params.n_targets);
allocate(values_cm, params.n_samples * params.n_targets);
allocate(coef, params.n_features * params.n_targets);
// Create the regression problem
make_regression(data, values_ret, params.n_samples, params.n_features,
params.n_informative, cublas_handle, cusolver_handle,
allocator, stream, coef, params.n_targets, params.bias,
params.effective_rank, tail_strength, noise, params.shuffle,
params.seed, params.gtype);
// Calculate the values from the data and coefficients (column-major)
T alpha = (T)1.0, beta = (T)0.0;
CUBLAS_CHECK(LinAlg::cublasgemm(
cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, params.n_samples,
params.n_targets, params.n_features, &alpha, data, params.n_features,
coef, params.n_targets, &beta, values_cm, params.n_samples, stream));
// Transpose the values to row-major
LinAlg::transpose(values_cm, values_prod, params.n_samples,
params.n_targets, cublas_handle, stream);
// Add the bias
LinAlg::addScalar(values_prod, values_prod, params.bias,
params.n_samples * params.n_targets, stream);
// Count the number of zeroes in the coefficients
thrust::device_ptr<T> __coef = thrust::device_pointer_cast(coef);
zero_count = thrust::count(
__coef, __coef + params.n_features * params.n_targets, (T)0.0);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(values_ret));
CUDA_CHECK(hipFree(values_prod));
CUDA_CHECK(hipFree(values_cm));
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
MakeRegressionInputs<T> params;
T *data, *values_ret, *values_prod, *values_cm, *coef;
int zero_count;
std::shared_ptr<deviceAllocator> allocator;
hipStream_t stream;
hipblasHandle_t cublas_handle;
hipsolverDnHandle_t cusolver_handle;
};
typedef MakeRegressionTest<float> MakeRegressionTestF;
const std::vector<MakeRegressionInputs<float>> inputsf_t = {
{0.01f, 256, 32, 16, 1, -1, 0.f, true, GenPhilox, 1234ULL},
{0.01f, 1000, 100, 47, 4, 65, 4.2f, true, GenPhilox, 1234ULL},
{0.01f, 20000, 500, 450, 13, -1, -3.f, false, GenPhilox, 1234ULL}};
TEST_P(MakeRegressionTestF, Result) {
ASSERT_TRUE(
match(params.n_targets * (params.n_features - params.n_informative),
zero_count, Compare<int>()));
ASSERT_TRUE(devArrMatch(values_ret, values_prod, params.n_samples,
params.n_targets,
CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(MakeRegressionTests, MakeRegressionTestF,
::testing::ValuesIn(inputsf_t));
typedef MakeRegressionTest<double> MakeRegressionTestD;
const std::vector<MakeRegressionInputs<double>> inputsd_t = {
{0.01, 256, 32, 16, 1, -1, 0.0, true, GenPhilox, 1234ULL},
{0.01, 1000, 100, 47, 4, 65, 4.2, true, GenPhilox, 1234ULL},
{0.01, 20000, 500, 450, 13, -1, -3.0, false, GenPhilox, 1234ULL}};
TEST_P(MakeRegressionTestD, Result) {
ASSERT_TRUE(
match(params.n_targets * (params.n_features - params.n_informative),
zero_count, Compare<int>()));
ASSERT_TRUE(devArrMatch(values_ret, values_prod, params.n_samples,
params.n_targets,
CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(MakeRegressionTests, MakeRegressionTestD,
::testing::ValuesIn(inputsd_t));
} // end namespace Random
} // end namespace MLCommon
| 72e98555e61b56c8d1d92ee56925d65081336bc7.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <common/cudart_utils.h>
#include <linalg/cublas_wrappers.h>
#include <linalg/transpose.h>
#include <cuda_utils.cuh>
#include <linalg/subtract.cuh>
#include <random/make_regression.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Random {
template <typename T>
struct MakeRegressionInputs {
T tolerance;
int n_samples, n_features, n_informative, n_targets, effective_rank;
T bias;
bool shuffle;
GeneratorType gtype;
uint64_t seed;
};
template <typename T>
class MakeRegressionTest
: public ::testing::TestWithParam<MakeRegressionInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MakeRegressionInputs<T>>::GetParam();
// Noise must be zero to compare the actual and expected values
T noise = (T)0.0, tail_strength = (T)0.5;
allocator.reset(new defaultDeviceAllocator);
CUBLAS_CHECK(cublasCreate(&cublas_handle));
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(data, params.n_samples * params.n_features);
allocate(values_ret, params.n_samples * params.n_targets);
allocate(values_prod, params.n_samples * params.n_targets);
allocate(values_cm, params.n_samples * params.n_targets);
allocate(coef, params.n_features * params.n_targets);
// Create the regression problem
make_regression(data, values_ret, params.n_samples, params.n_features,
params.n_informative, cublas_handle, cusolver_handle,
allocator, stream, coef, params.n_targets, params.bias,
params.effective_rank, tail_strength, noise, params.shuffle,
params.seed, params.gtype);
// Calculate the values from the data and coefficients (column-major)
T alpha = (T)1.0, beta = (T)0.0;
CUBLAS_CHECK(LinAlg::cublasgemm(
cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, params.n_samples,
params.n_targets, params.n_features, &alpha, data, params.n_features,
coef, params.n_targets, &beta, values_cm, params.n_samples, stream));
// Transpose the values to row-major
LinAlg::transpose(values_cm, values_prod, params.n_samples,
params.n_targets, cublas_handle, stream);
// Add the bias
LinAlg::addScalar(values_prod, values_prod, params.bias,
params.n_samples * params.n_targets, stream);
// Count the number of zeroes in the coefficients
thrust::device_ptr<T> __coef = thrust::device_pointer_cast(coef);
zero_count = thrust::count(
__coef, __coef + params.n_features * params.n_targets, (T)0.0);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(values_ret));
CUDA_CHECK(cudaFree(values_prod));
CUDA_CHECK(cudaFree(values_cm));
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
MakeRegressionInputs<T> params;
T *data, *values_ret, *values_prod, *values_cm, *coef;
int zero_count;
std::shared_ptr<deviceAllocator> allocator;
cudaStream_t stream;
cublasHandle_t cublas_handle;
cusolverDnHandle_t cusolver_handle;
};
typedef MakeRegressionTest<float> MakeRegressionTestF;
const std::vector<MakeRegressionInputs<float>> inputsf_t = {
{0.01f, 256, 32, 16, 1, -1, 0.f, true, GenPhilox, 1234ULL},
{0.01f, 1000, 100, 47, 4, 65, 4.2f, true, GenPhilox, 1234ULL},
{0.01f, 20000, 500, 450, 13, -1, -3.f, false, GenPhilox, 1234ULL}};
TEST_P(MakeRegressionTestF, Result) {
ASSERT_TRUE(
match(params.n_targets * (params.n_features - params.n_informative),
zero_count, Compare<int>()));
ASSERT_TRUE(devArrMatch(values_ret, values_prod, params.n_samples,
params.n_targets,
CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(MakeRegressionTests, MakeRegressionTestF,
::testing::ValuesIn(inputsf_t));
typedef MakeRegressionTest<double> MakeRegressionTestD;
const std::vector<MakeRegressionInputs<double>> inputsd_t = {
{0.01, 256, 32, 16, 1, -1, 0.0, true, GenPhilox, 1234ULL},
{0.01, 1000, 100, 47, 4, 65, 4.2, true, GenPhilox, 1234ULL},
{0.01, 20000, 500, 450, 13, -1, -3.0, false, GenPhilox, 1234ULL}};
TEST_P(MakeRegressionTestD, Result) {
ASSERT_TRUE(
match(params.n_targets * (params.n_features - params.n_informative),
zero_count, Compare<int>()));
ASSERT_TRUE(devArrMatch(values_ret, values_prod, params.n_samples,
params.n_targets,
CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(MakeRegressionTests, MakeRegressionTestD,
::testing::ValuesIn(inputsd_t));
} // end namespace Random
} // end namespace MLCommon
|
3e09017ebb1449746482b74781edbb6b95dc0150.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "stdio.h"
#define R 1
#define C 32
#define H 1
#define numThrdx 32
#define numThrdy 1
#define numThrdz 1
#define ITERS 1000000000
#define g 64
__device__ uint get_smid(void) {
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
void DisplayHeader()
{
const int kb = 1024;
const int mb = kb * kb;
std::cout << "NBody.GPU" <<"\n" << "=========" <<"\n" <<"\n";
std::cout << "CUDA version: v" << CUDART_VERSION <<"\n";
//std::cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION <<"\n" <<"\n";
int devCount;
hipGetDeviceCount(&devCount);
std::cout << "CUDA Devices: " <<"\n" <<"\n";
for(int i = 0; i < devCount; ++i)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
std::cout << i << ": " << props.name << ": " << props.major << "." << props.minor <<"\n";
std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" <<"\n";
std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" <<"\n";
std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" <<"\n";
std::cout << " Block registers: " << props.regsPerBlock <<"\n" <<"\n";
std::cout << " Warp size: " << props.warpSize <<"\n";
std::cout << " Threads per block: " << props.maxThreadsPerBlock <<"\n";
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", "<< props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2]<<" ]" <<"\n";
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" <<"\n";
std::cout <<"\n";
}
}
__device__ void iteratively_divergent(int *a, int* w, int *k, int i) {
__syncthreads();
int gtid = gridDim.x*blockDim.x*gridDim.y*blockDim.y*threadIdx.z + (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
for(int i=0; i<32 ;i++)
if(gtid <= i)
{
continue;
}
else
{
// atomicAdd(&k[0],1);
k[0]+=1;
a[gtid] = k[0];
w[gtid] = clock();
}
}
__global__ void full_divergent(int *a, int* w, int *k) {
__syncthreads();
iteratively_divergent(a,w,k,0);
//store last value of k in a[0]
int stime = clock();
a[0] = 1;
int ftime = clock();
a[0] = ftime - stime;
}
__global__ void zero_divergent(int *a, int *k) {
int gtid = gridDim.x*blockDim.x*gridDim.y*blockDim.y*threadIdx.z + (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
int j = 0;
while(j++ < ITERS);
a[gtid] = (99 + get_smid());
}
int main() {
DisplayHeader();
int a[R][C][H], k[1], w[R][C][H];
int *dev_a, *dev_k, *dev_warp;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc((void **) &dev_a, R*C*H*sizeof(int));
hipMalloc((void **) &dev_warp, R*C*H*sizeof(int));
hipMalloc((void **) &dev_k, sizeof(int));
// Fill Arrays
for (int k = 0; k < H; k++) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
a[k][i][j] = 0;
w[k][i][j] = 0;
}
}
}
k[0] = 0;
hipMemcpy(dev_a, a, R*C*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_warp, w, R*C*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_k, k, sizeof(k), hipMemcpyHostToDevice);
// Kernel invocation
dim3 threadsPerBlock(numThrdx,numThrdy,numThrdz);
dim3 numBlocks( C/threadsPerBlock.x, R/threadsPerBlock.y, H/threadsPerBlock.z );
std::cout<<"numBlocks.x="<<numBlocks.x<<" numBlocks.y="<<numBlocks.y<<" numBlocks.z="<<numBlocks.z<<"\n";
hipEventRecord(start);hipLaunchKernelGGL((
full_divergent), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dev_a, dev_warp, dev_k);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipMemcpy(a, dev_a, R*C*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(w, dev_warp, R*C*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(k, dev_k, sizeof(int), hipMemcpyDeviceToHost);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
std::cout<<"results:\n";
for (int k = 0; k < H; k++) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
std::cout << a[k][i][j] <<" ";
}
std::cout<<"\n";
}
}
std::cout<<"clocks:\n";
for (int k = 0; k < H; k++) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
std::cout << w[k][i][j] <<" ";
}
std::cout<<"\n";
}
}
std::cout<<"numBlocks.x="<<numBlocks.x<<" numBlocks.y="<<numBlocks.y<<" numBlocks.z="<<numBlocks.z<<"\n";
std::cout<<"Elapsed time = "<<elapsed_time<<" ms\n";
hipDeviceReset();
return 0;
}
| 3e09017ebb1449746482b74781edbb6b95dc0150.cu | #include <iostream>
#include "stdio.h"
#define R 1
#define C 32
#define H 1
#define numThrdx 32
#define numThrdy 1
#define numThrdz 1
#define ITERS 1000000000
#define g 64
__device__ uint get_smid(void) {
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
void DisplayHeader()
{
const int kb = 1024;
const int mb = kb * kb;
std::cout << "NBody.GPU" <<"\n" << "=========" <<"\n" <<"\n";
std::cout << "CUDA version: v" << CUDART_VERSION <<"\n";
//std::cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION <<"\n" <<"\n";
int devCount;
cudaGetDeviceCount(&devCount);
std::cout << "CUDA Devices: " <<"\n" <<"\n";
for(int i = 0; i < devCount; ++i)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
std::cout << i << ": " << props.name << ": " << props.major << "." << props.minor <<"\n";
std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" <<"\n";
std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" <<"\n";
std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" <<"\n";
std::cout << " Block registers: " << props.regsPerBlock <<"\n" <<"\n";
std::cout << " Warp size: " << props.warpSize <<"\n";
std::cout << " Threads per block: " << props.maxThreadsPerBlock <<"\n";
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", "<< props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2]<<" ]" <<"\n";
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" <<"\n";
std::cout <<"\n";
}
}
__device__ void iteratively_divergent(int *a, int* w, int *k, int i) {
__syncthreads();
int gtid = gridDim.x*blockDim.x*gridDim.y*blockDim.y*threadIdx.z + (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
for(int i=0; i<32 ;i++)
if(gtid <= i)
{
continue;
}
else
{
// atomicAdd(&k[0],1);
k[0]+=1;
a[gtid] = k[0];
w[gtid] = clock();
}
}
__global__ void full_divergent(int *a, int* w, int *k) {
__syncthreads();
iteratively_divergent(a,w,k,0);
//store last value of k in a[0]
int stime = clock();
a[0] = 1;
int ftime = clock();
a[0] = ftime - stime;
}
__global__ void zero_divergent(int *a, int *k) {
int gtid = gridDim.x*blockDim.x*gridDim.y*blockDim.y*threadIdx.z + (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
int j = 0;
while(j++ < ITERS);
a[gtid] = (99 + get_smid());
}
int main() {
DisplayHeader();
int a[R][C][H], k[1], w[R][C][H];
int *dev_a, *dev_k, *dev_warp;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **) &dev_a, R*C*H*sizeof(int));
cudaMalloc((void **) &dev_warp, R*C*H*sizeof(int));
cudaMalloc((void **) &dev_k, sizeof(int));
// Fill Arrays
for (int k = 0; k < H; k++) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
a[k][i][j] = 0;
w[k][i][j] = 0;
}
}
}
k[0] = 0;
cudaMemcpy(dev_a, a, R*C*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_warp, w, R*C*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_k, k, sizeof(k), cudaMemcpyHostToDevice);
// Kernel invocation
dim3 threadsPerBlock(numThrdx,numThrdy,numThrdz);
dim3 numBlocks( C/threadsPerBlock.x, R/threadsPerBlock.y, H/threadsPerBlock.z );
std::cout<<"numBlocks.x="<<numBlocks.x<<" numBlocks.y="<<numBlocks.y<<" numBlocks.z="<<numBlocks.z<<"\n";
cudaEventRecord(start);
full_divergent<<<numBlocks, threadsPerBlock>>>(dev_a, dev_warp, dev_k);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaMemcpy(a, dev_a, R*C*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(w, dev_warp, R*C*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(k, dev_k, sizeof(int), cudaMemcpyDeviceToHost);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
std::cout<<"results:\n";
for (int k = 0; k < H; k++) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
std::cout << a[k][i][j] <<" ";
}
std::cout<<"\n";
}
}
std::cout<<"clocks:\n";
for (int k = 0; k < H; k++) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
std::cout << w[k][i][j] <<" ";
}
std::cout<<"\n";
}
}
std::cout<<"numBlocks.x="<<numBlocks.x<<" numBlocks.y="<<numBlocks.y<<" numBlocks.z="<<numBlocks.z<<"\n";
std::cout<<"Elapsed time = "<<elapsed_time<<" ms\n";
cudaDeviceReset();
return 0;
}
|
e6c58e104f7a9a85ef665fc0e060e823fbf493be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/reduction_ops.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/cub_namespace.cuh"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(SumElements, SumElementsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SumElementsInt, SumElementsIntOp<int, CUDAContext>);
REGISTER_CUDA_OPERATOR(SumSqrElements, SumSqrElementsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(RowwiseMax, MaxReductionOp<float, CUDAContext, true>);
REGISTER_CUDA_OPERATOR(ColwiseMax, MaxReductionOp<float, CUDAContext, false>);
REGISTER_CUDA_OPERATOR(
RowwiseMaxGradient,
MaxReductionGradientOp<float, CUDAContext, true>)
REGISTER_CUDA_OPERATOR(
ColwiseMaxGradient,
MaxReductionGradientOp<float, CUDAContext, false>)
REGISTER_CUDA_OPERATOR(
SumElementsGradient,
SumElementsGradientOp<float, CUDAContext>);
template <typename T>
__global__ void
SumElementsGradientKernel(bool average, const int N, const T* dY, T* dX) {
const T value = average ? (*dY) / N : *dY;
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = value;
}
}
__global__ void rowwise_max_gradient_kernel(
const int batch_size,
const int M,
const int N,
const float* X,
const float* Y,
const float* dY,
float* dX) {
const int input_size = M * N;
CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) {
const int b_i = i / input_size;
const int b_n = i / input_size / N;
const int y_index = b_i * M + b_n;
if (X[i] == Y[y_index]) {
dX[i] = dY[y_index];
} else {
dX[i] = 0.0;
}
}
}
template <>
bool SumSqrElementsOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
__global__ void colwise_max_gradient_kernel(
const int batch_size,
const int M,
const int N,
const float* X,
const float* Y,
const float* dY,
float* dX) {
const int input_size = M * N;
CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) {
const int b_i = i / input_size;
const int b_n = i % input_size % N;
const int y_index = b_i * N + b_n;
if (X[i] == Y[y_index]) {
dX[i] = dY[y_index];
} else {
dX[i] = 0.0;
}
}
}
template <>
bool SumElementsGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
DCHECK_EQ(dY.numel(), 1);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SumElementsGradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
average_,
X.numel(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <typename T, class Context, bool ROWWISE>
bool MaxReductionGradientOp<T, Context, ROWWISE>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
CAFFE_ENFORCE_EQ(X.dim(), 3);
const int batch_size = X.dim32(0);
const int M = X.dim32(1);
const int N = X.dim32(2);
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
const int input_size = M * N;
if (ROWWISE) {
hipLaunchKernelGGL(( rowwise_max_gradient_kernel),
dim3(CAFFE_GET_BLOCKS(batch_size * input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
batch_size, M, N, Xdata, Ydata, dYdata, dXdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( colwise_max_gradient_kernel),
dim3(CAFFE_GET_BLOCKS(batch_size * input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
batch_size, M, N, Xdata, Ydata, dYdata, dXdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
} // namespace caffe2
| e6c58e104f7a9a85ef665fc0e060e823fbf493be.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/reduction_ops.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/cub_namespace.cuh"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(SumElements, SumElementsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SumElementsInt, SumElementsIntOp<int, CUDAContext>);
REGISTER_CUDA_OPERATOR(SumSqrElements, SumSqrElementsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(RowwiseMax, MaxReductionOp<float, CUDAContext, true>);
REGISTER_CUDA_OPERATOR(ColwiseMax, MaxReductionOp<float, CUDAContext, false>);
REGISTER_CUDA_OPERATOR(
RowwiseMaxGradient,
MaxReductionGradientOp<float, CUDAContext, true>)
REGISTER_CUDA_OPERATOR(
ColwiseMaxGradient,
MaxReductionGradientOp<float, CUDAContext, false>)
REGISTER_CUDA_OPERATOR(
SumElementsGradient,
SumElementsGradientOp<float, CUDAContext>);
template <typename T>
__global__ void
SumElementsGradientKernel(bool average, const int N, const T* dY, T* dX) {
const T value = average ? (*dY) / N : *dY;
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = value;
}
}
__global__ void rowwise_max_gradient_kernel(
const int batch_size,
const int M,
const int N,
const float* X,
const float* Y,
const float* dY,
float* dX) {
const int input_size = M * N;
CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) {
const int b_i = i / input_size;
const int b_n = i / input_size / N;
const int y_index = b_i * M + b_n;
if (X[i] == Y[y_index]) {
dX[i] = dY[y_index];
} else {
dX[i] = 0.0;
}
}
}
template <>
bool SumSqrElementsOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
__global__ void colwise_max_gradient_kernel(
const int batch_size,
const int M,
const int N,
const float* X,
const float* Y,
const float* dY,
float* dX) {
const int input_size = M * N;
CUDA_1D_KERNEL_LOOP(i, batch_size * M * N) {
const int b_i = i / input_size;
const int b_n = i % input_size % N;
const int y_index = b_i * N + b_n;
if (X[i] == Y[y_index]) {
dX[i] = dY[y_index];
} else {
dX[i] = 0.0;
}
}
}
template <>
bool SumElementsGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
DCHECK_EQ(dY.numel(), 1);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
SumElementsGradientKernel<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
average_,
X.numel(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <typename T, class Context, bool ROWWISE>
bool MaxReductionGradientOp<T, Context, ROWWISE>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
CAFFE_ENFORCE_EQ(X.dim(), 3);
const int batch_size = X.dim32(0);
const int M = X.dim32(1);
const int N = X.dim32(2);
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
const int input_size = M * N;
if (ROWWISE) {
rowwise_max_gradient_kernel<<<
CAFFE_GET_BLOCKS(batch_size * input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
batch_size, M, N, Xdata, Ydata, dYdata, dXdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
colwise_max_gradient_kernel<<<
CAFFE_GET_BLOCKS(batch_size * input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
batch_size, M, N, Xdata, Ydata, dYdata, dXdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
} // namespace caffe2
|
6c93b242c6e6426446a28909839e109245b5816e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp < (var_1 * var_2 * var_3)) {
comp += -1.6448E6f + var_4 * var_5;
comp = expf(var_6 + +1.7628E-35f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
hipDeviceSynchronize();
return 0;
}
| 6c93b242c6e6426446a28909839e109245b5816e.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp < (var_1 * var_2 * var_3)) {
comp += -1.6448E6f + var_4 * var_5;
comp = expf(var_6 + +1.7628E-35f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
cudaDeviceSynchronize();
return 0;
}
|
09a19fade51765d3120c3a109aa290a252bc28c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "MaskedFill.hpp"
template class MaskedFill<float>;
__forceinline__ __device__ int *GetTensorDimIndex(int index1D, int *idxDim, int capacityPerTime)
{
#pragma unroll
for(int i = 0; i < 4; i++) {
idxDim[i] = index1D/capacityPerTime;
index1D %= capacityPerTime;
capacityPerTime /= idxDim[i+1];
}
idxDim[4] = index1D;
return idxDim;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int ti, int ba, int ch, int ro, int co) {
return (((ti * (shapeDim)[1] + ba) * (shapeDim)[2] + ch) * (shapeDim)[3] + ro) * (shapeDim)[4] + co;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int *idxDim) {
return (((idxDim[0] * (shapeDim)[1] + idxDim[1]) * (shapeDim)[2] + idxDim[2]) * (shapeDim)[3] + idxDim[3]) * (shapeDim)[4] + idxDim[4];
}
__global__ void MaskedFillForwardPropagate_Kernel(float *pDevInput, float *pDevMask, float *pDevOutput,
int maskCapacity, int totalCapacity, float maskingValue, int timesize, int batchsize,
int channelsize, int rowsize, int colsize) {
int resultIdx = threadIdx.x + blockDim.x * blockIdx.x;
int resultShapeDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int resultIdxDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int capacityPerTime = batchsize*channelsize*rowsize*colsize;
GetTensorDimIndex(resultIdx, resultIdxDim, capacityPerTime);
int maskShapeDim[5] = {resultShapeDim[0], resultShapeDim[1], 1, resultShapeDim[3], resultShapeDim[4]};
int maskIdxDim[5] = {resultIdxDim[0], resultIdxDim[1], 0, resultIdxDim[3], resultIdxDim[4]};
int maskIdx = GetIdx(maskShapeDim, maskIdxDim);
if (maskIdx < maskCapacity && resultIdx < totalCapacity) {
if (pDevMask[maskIdx])
pDevOutput[resultIdx] = maskingValue;
else
pDevOutput[resultIdx] = pDevInput[resultIdx];
}
}
__global__ void MaskedFillBackPropagate_Kernel(float *pDevDelta, float *pDevMask, float *pDevInputDelta,
int maskCapacity, int totalCapacity, int timesize, int batchsize,
int channelsize, int rowsize, int colsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int shapeDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int idxDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int capacityPerTime = batchsize*channelsize*rowsize*colsize;
GetTensorDimIndex(idx, idxDim, capacityPerTime);
int maskShapeDim[5] = {shapeDim[0], shapeDim[1], 1, shapeDim[3], shapeDim[4]};
int maskIdxDim[5] = {idxDim[0], idxDim[1], 0, idxDim[3], idxDim[4]};
int maskIdx = GetIdx(maskShapeDim, maskIdxDim);
if (maskIdx < maskCapacity && idx < totalCapacity) {
if (pDevMask[maskIdx]) {
pDevInputDelta[idx] = 0;
}
else {
pDevInputDelta[idx] = pDevDelta[idx];
}
}
}
template <typename DTYPE> int MaskedFill<DTYPE>::ForwardPropagateOnGPU(int pTime) {
Tensor<DTYPE> *input = this->GetInput()[0]->GetResult();
Tensor<DTYPE> *mask = this->GetInput()[1]->GetResult();
Tensor<DTYPE> *result = this->GetResult();
DTYPE *m_pDevInput = input->GetGPUData(pTime);
DTYPE *m_pDevMask = mask->GetGPUData(0);
DTYPE *m_pDevOutput = result->GetGPUData(pTime);
int timesize = result->GetTimeSize();
int batchsize = result->GetBatchSize();
int channelsize = result->GetChannelSize();
int rowsize = result->GetRowSize();
int colsize = result->GetColSize();
int totalCapacity = result->GetCapacity() / timesize;
int inputCapacity = input->GetCapacity() / input->GetTimeSize();
int maskCapacity = mask->GetCapacity() / mask->GetTimeSize();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
GetKernelParameters(totalCapacity, &noBlock, &threadsPerBlock);
hipLaunchKernelGGL(( MaskedFillForwardPropagate_Kernel) , dim3(noBlock), dim3(threadsPerBlock) , 0, 0, m_pDevInput, m_pDevMask, m_pDevOutput, maskCapacity, totalCapacity,
m_maskingValue, timesize, batchsize, channelsize, rowsize, colsize);
checkCudaErrors(hipDeviceSynchronize());
return TRUE;
}
template <typename DTYPE> int MaskedFill<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *mask = this->GetInput()[1]->GetResult();
Tensor<DTYPE> *input_delta = this->GetInput()[0]->GetDelta();
Tensor<DTYPE> *this_delta = this->GetDelta();
DTYPE *m_pDevInputDelta = input_delta->GetGPUData(pTime);
DTYPE *m_pDevMask = mask->GetGPUData(0);
DTYPE *m_pDevDelta = this_delta->GetGPUData(pTime);
int timesize = input_delta->GetTimeSize();
int batchsize = input_delta->GetBatchSize();
int channelsize = input_delta->GetChannelSize();
int rowsize = input_delta->GetRowSize();
int colsize = input_delta->GetColSize();
int totalCapacity = input_delta->GetCapacity() / timesize;
int maskCapacity = mask->GetCapacity() / mask->GetTimeSize();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
GetKernelParameters(totalCapacity, &noBlock, &threadsPerBlock);
hipLaunchKernelGGL(( MaskedFillBackPropagate_Kernel) , dim3(noBlock), dim3(threadsPerBlock) , 0, 0, m_pDevDelta, m_pDevMask, m_pDevInputDelta, maskCapacity, totalCapacity,
timesize, batchsize, channelsize, rowsize, colsize);
checkCudaErrors(hipDeviceSynchronize());
return TRUE;
}
#endif
| 09a19fade51765d3120c3a109aa290a252bc28c5.cu | #ifdef __CUDNN__
#include "MaskedFill.hpp"
template class MaskedFill<float>;
__forceinline__ __device__ int *GetTensorDimIndex(int index1D, int *idxDim, int capacityPerTime)
{
#pragma unroll
for(int i = 0; i < 4; i++) {
idxDim[i] = index1D/capacityPerTime;
index1D %= capacityPerTime;
capacityPerTime /= idxDim[i+1];
}
idxDim[4] = index1D;
return idxDim;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int ti, int ba, int ch, int ro, int co) {
return (((ti * (shapeDim)[1] + ba) * (shapeDim)[2] + ch) * (shapeDim)[3] + ro) * (shapeDim)[4] + co;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int *idxDim) {
return (((idxDim[0] * (shapeDim)[1] + idxDim[1]) * (shapeDim)[2] + idxDim[2]) * (shapeDim)[3] + idxDim[3]) * (shapeDim)[4] + idxDim[4];
}
__global__ void MaskedFillForwardPropagate_Kernel(float *pDevInput, float *pDevMask, float *pDevOutput,
int maskCapacity, int totalCapacity, float maskingValue, int timesize, int batchsize,
int channelsize, int rowsize, int colsize) {
int resultIdx = threadIdx.x + blockDim.x * blockIdx.x;
int resultShapeDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int resultIdxDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int capacityPerTime = batchsize*channelsize*rowsize*colsize;
GetTensorDimIndex(resultIdx, resultIdxDim, capacityPerTime);
int maskShapeDim[5] = {resultShapeDim[0], resultShapeDim[1], 1, resultShapeDim[3], resultShapeDim[4]};
int maskIdxDim[5] = {resultIdxDim[0], resultIdxDim[1], 0, resultIdxDim[3], resultIdxDim[4]};
int maskIdx = GetIdx(maskShapeDim, maskIdxDim);
if (maskIdx < maskCapacity && resultIdx < totalCapacity) {
if (pDevMask[maskIdx])
pDevOutput[resultIdx] = maskingValue;
else
pDevOutput[resultIdx] = pDevInput[resultIdx];
}
}
__global__ void MaskedFillBackPropagate_Kernel(float *pDevDelta, float *pDevMask, float *pDevInputDelta,
int maskCapacity, int totalCapacity, int timesize, int batchsize,
int channelsize, int rowsize, int colsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int shapeDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int idxDim[5] = {timesize, batchsize, channelsize, rowsize, colsize};
int capacityPerTime = batchsize*channelsize*rowsize*colsize;
GetTensorDimIndex(idx, idxDim, capacityPerTime);
int maskShapeDim[5] = {shapeDim[0], shapeDim[1], 1, shapeDim[3], shapeDim[4]};
int maskIdxDim[5] = {idxDim[0], idxDim[1], 0, idxDim[3], idxDim[4]};
int maskIdx = GetIdx(maskShapeDim, maskIdxDim);
if (maskIdx < maskCapacity && idx < totalCapacity) {
if (pDevMask[maskIdx]) {
pDevInputDelta[idx] = 0;
}
else {
pDevInputDelta[idx] = pDevDelta[idx];
}
}
}
template <typename DTYPE> int MaskedFill<DTYPE>::ForwardPropagateOnGPU(int pTime) {
Tensor<DTYPE> *input = this->GetInput()[0]->GetResult();
Tensor<DTYPE> *mask = this->GetInput()[1]->GetResult();
Tensor<DTYPE> *result = this->GetResult();
DTYPE *m_pDevInput = input->GetGPUData(pTime);
DTYPE *m_pDevMask = mask->GetGPUData(0);
DTYPE *m_pDevOutput = result->GetGPUData(pTime);
int timesize = result->GetTimeSize();
int batchsize = result->GetBatchSize();
int channelsize = result->GetChannelSize();
int rowsize = result->GetRowSize();
int colsize = result->GetColSize();
int totalCapacity = result->GetCapacity() / timesize;
int inputCapacity = input->GetCapacity() / input->GetTimeSize();
int maskCapacity = mask->GetCapacity() / mask->GetTimeSize();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
GetKernelParameters(totalCapacity, &noBlock, &threadsPerBlock);
MaskedFillForwardPropagate_Kernel <<< noBlock, threadsPerBlock >>> (m_pDevInput, m_pDevMask, m_pDevOutput, maskCapacity, totalCapacity,
m_maskingValue, timesize, batchsize, channelsize, rowsize, colsize);
checkCudaErrors(cudaDeviceSynchronize());
return TRUE;
}
template <typename DTYPE> int MaskedFill<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *mask = this->GetInput()[1]->GetResult();
Tensor<DTYPE> *input_delta = this->GetInput()[0]->GetDelta();
Tensor<DTYPE> *this_delta = this->GetDelta();
DTYPE *m_pDevInputDelta = input_delta->GetGPUData(pTime);
DTYPE *m_pDevMask = mask->GetGPUData(0);
DTYPE *m_pDevDelta = this_delta->GetGPUData(pTime);
int timesize = input_delta->GetTimeSize();
int batchsize = input_delta->GetBatchSize();
int channelsize = input_delta->GetChannelSize();
int rowsize = input_delta->GetRowSize();
int colsize = input_delta->GetColSize();
int totalCapacity = input_delta->GetCapacity() / timesize;
int maskCapacity = mask->GetCapacity() / mask->GetTimeSize();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
GetKernelParameters(totalCapacity, &noBlock, &threadsPerBlock);
MaskedFillBackPropagate_Kernel <<< noBlock, threadsPerBlock >>> (m_pDevDelta, m_pDevMask, m_pDevInputDelta, maskCapacity, totalCapacity,
timesize, batchsize, channelsize, rowsize, colsize);
checkCudaErrors(cudaDeviceSynchronize());
return TRUE;
}
#endif
|
25137c7cfe6798e52e2aa3dd4651441030274999.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "libwrap.h"
#include <dlfcn.h>
#include "core.h"
int symbolsLoaded = 0;
static rsmi_status_t (*nvmlInternalInit)(void);
static rsmi_status_t (*nvmlInternalShutdown)(void);
static rsmi_status_t (*nvmlInternalDeviceGetHandleByPciBusId)(const char* pciBusId, uint32_t* device);
static rsmi_status_t (*nvmlInternalDeviceGetIndex)(uint32_t device, unsigned* index);
static rsmi_status_t (*nvmlInternalDeviceSetCpuAffinity)(uint32_t device);
static rsmi_status_t (*nvmlInternalDeviceClearCpuAffinity)(uint32_t device);
static const char* (*nvmlInternalErrorString)(rsmi_status_t r);
ncclResult_t wrapSymbols(void) {
if (symbolsLoaded)
return ncclSuccess;
static void* nvmlhandle = nullptr;
void* tmp;
void** cast;
nvmlhandle=dlopen("libnvidia-ml.so", RTLD_NOW);
if (!nvmlhandle) {
nvmlhandle=dlopen("libnvidia-ml.so.1", RTLD_NOW);
if (!nvmlhandle) {
WARN("Failed to open libnvidia-ml.so[.1]");
goto teardown;
}
}
#define LOAD_SYM(handle, symbol, funcptr) do { \
cast = (void**)&funcptr; \
tmp = dlsym(handle, symbol); \
if (tmp == nullptr) { \
WARN("dlsym failed on %s - %s", symbol, dlerror());\
goto teardown; \
} \
*cast = tmp; \
} while (0)
LOAD_SYM(nvmlhandle, "nvmlInit", nvmlInternalInit);
LOAD_SYM(nvmlhandle, "nvmlShutdown", nvmlInternalShutdown);
LOAD_SYM(nvmlhandle, "nvmlDeviceGetHandleByPciBusId", nvmlInternalDeviceGetHandleByPciBusId);
LOAD_SYM(nvmlhandle, "nvmlDeviceGetIndex", nvmlInternalDeviceGetIndex);
LOAD_SYM(nvmlhandle, "nvmlDeviceSetCpuAffinity", nvmlInternalDeviceSetCpuAffinity);
LOAD_SYM(nvmlhandle, "nvmlDeviceClearCpuAffinity", nvmlInternalDeviceClearCpuAffinity);
LOAD_SYM(nvmlhandle, "nvmlErrorString", nvmlInternalErrorString);
symbolsLoaded = 1;
return ncclSuccess;
teardown:
nvmlInternalInit = nullptr;
nvmlInternalShutdown = nullptr;
nvmlInternalDeviceGetHandleByPciBusId = nullptr;
nvmlInternalDeviceGetIndex = nullptr;
nvmlInternalDeviceSetCpuAffinity = nullptr;
nvmlInternalDeviceClearCpuAffinity = nullptr;
if (nvmlhandle != nullptr) dlclose(nvmlhandle);
return ncclSystemError;
}
ncclResult_t wrapNvmlInit(void) {
if (nvmlInternalInit == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
rsmi_status_t ret = nvmlInternalInit();
if (ret != RSMI_STATUS_SUCCESS) {
WARN("nvmlInit() failed: %s",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlShutdown(void) {
if (nvmlInternalShutdown == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
rsmi_status_t ret = nvmlInternalShutdown();
if (ret != RSMI_STATUS_SUCCESS) {
WARN("nvmlShutdown() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceGetHandleByPciBusId(const char* pciBusId, uint32_t* device) {
if (nvmlInternalDeviceGetHandleByPciBusId == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
rsmi_status_t ret = nvmlInternalDeviceGetHandleByPciBusId(pciBusId, device);
if (ret != RSMI_STATUS_SUCCESS) {
WARN("nvmlDeviceGetHandleByPciBusId() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceGetIndex(uint32_t device, unsigned* index) {
if (nvmlInternalDeviceGetIndex == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
rsmi_status_t ret = nvmlInternalDeviceGetIndex(device, index);
if (ret != RSMI_STATUS_SUCCESS) {
WARN("nvmlDeviceGetIndex() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceSetCpuAffinity(uint32_t device) {
if (nvmlInternalDeviceSetCpuAffinity == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
rsmi_status_t ret = nvmlInternalDeviceSetCpuAffinity(device);
if (ret != RSMI_STATUS_SUCCESS) {
WARN("nvmlDeviceSetCpuAffinity() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceClearCpuAffinity(uint32_t device) {
if (nvmlInternalInit == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
rsmi_status_t ret = nvmlInternalDeviceClearCpuAffinity(device);
if (ret != RSMI_STATUS_SUCCESS) {
WARN("nvmlDeviceClearCpuAffinity() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
| 25137c7cfe6798e52e2aa3dd4651441030274999.cu | /*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "libwrap.h"
#include <dlfcn.h>
#include "core.h"
int symbolsLoaded = 0;
static nvmlReturn_t (*nvmlInternalInit)(void);
static nvmlReturn_t (*nvmlInternalShutdown)(void);
static nvmlReturn_t (*nvmlInternalDeviceGetHandleByPciBusId)(const char* pciBusId, nvmlDevice_t* device);
static nvmlReturn_t (*nvmlInternalDeviceGetIndex)(nvmlDevice_t device, unsigned* index);
static nvmlReturn_t (*nvmlInternalDeviceSetCpuAffinity)(nvmlDevice_t device);
static nvmlReturn_t (*nvmlInternalDeviceClearCpuAffinity)(nvmlDevice_t device);
static const char* (*nvmlInternalErrorString)(nvmlReturn_t r);
ncclResult_t wrapSymbols(void) {
if (symbolsLoaded)
return ncclSuccess;
static void* nvmlhandle = nullptr;
void* tmp;
void** cast;
nvmlhandle=dlopen("libnvidia-ml.so", RTLD_NOW);
if (!nvmlhandle) {
nvmlhandle=dlopen("libnvidia-ml.so.1", RTLD_NOW);
if (!nvmlhandle) {
WARN("Failed to open libnvidia-ml.so[.1]");
goto teardown;
}
}
#define LOAD_SYM(handle, symbol, funcptr) do { \
cast = (void**)&funcptr; \
tmp = dlsym(handle, symbol); \
if (tmp == nullptr) { \
WARN("dlsym failed on %s - %s", symbol, dlerror());\
goto teardown; \
} \
*cast = tmp; \
} while (0)
LOAD_SYM(nvmlhandle, "nvmlInit", nvmlInternalInit);
LOAD_SYM(nvmlhandle, "nvmlShutdown", nvmlInternalShutdown);
LOAD_SYM(nvmlhandle, "nvmlDeviceGetHandleByPciBusId", nvmlInternalDeviceGetHandleByPciBusId);
LOAD_SYM(nvmlhandle, "nvmlDeviceGetIndex", nvmlInternalDeviceGetIndex);
LOAD_SYM(nvmlhandle, "nvmlDeviceSetCpuAffinity", nvmlInternalDeviceSetCpuAffinity);
LOAD_SYM(nvmlhandle, "nvmlDeviceClearCpuAffinity", nvmlInternalDeviceClearCpuAffinity);
LOAD_SYM(nvmlhandle, "nvmlErrorString", nvmlInternalErrorString);
symbolsLoaded = 1;
return ncclSuccess;
teardown:
nvmlInternalInit = nullptr;
nvmlInternalShutdown = nullptr;
nvmlInternalDeviceGetHandleByPciBusId = nullptr;
nvmlInternalDeviceGetIndex = nullptr;
nvmlInternalDeviceSetCpuAffinity = nullptr;
nvmlInternalDeviceClearCpuAffinity = nullptr;
if (nvmlhandle != nullptr) dlclose(nvmlhandle);
return ncclSystemError;
}
ncclResult_t wrapNvmlInit(void) {
if (nvmlInternalInit == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
nvmlReturn_t ret = nvmlInternalInit();
if (ret != NVML_SUCCESS) {
WARN("nvmlInit() failed: %s",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlShutdown(void) {
if (nvmlInternalShutdown == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
nvmlReturn_t ret = nvmlInternalShutdown();
if (ret != NVML_SUCCESS) {
WARN("nvmlShutdown() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceGetHandleByPciBusId(const char* pciBusId, nvmlDevice_t* device) {
if (nvmlInternalDeviceGetHandleByPciBusId == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
nvmlReturn_t ret = nvmlInternalDeviceGetHandleByPciBusId(pciBusId, device);
if (ret != NVML_SUCCESS) {
WARN("nvmlDeviceGetHandleByPciBusId() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceGetIndex(nvmlDevice_t device, unsigned* index) {
if (nvmlInternalDeviceGetIndex == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
nvmlReturn_t ret = nvmlInternalDeviceGetIndex(device, index);
if (ret != NVML_SUCCESS) {
WARN("nvmlDeviceGetIndex() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceSetCpuAffinity(nvmlDevice_t device) {
if (nvmlInternalDeviceSetCpuAffinity == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
nvmlReturn_t ret = nvmlInternalDeviceSetCpuAffinity(device);
if (ret != NVML_SUCCESS) {
WARN("nvmlDeviceSetCpuAffinity() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t wrapNvmlDeviceClearCpuAffinity(nvmlDevice_t device) {
if (nvmlInternalInit == nullptr) {
WARN("lib wrapper not initialized.");
return ncclLibWrapperNotSet;
}
nvmlReturn_t ret = nvmlInternalDeviceClearCpuAffinity(device);
if (ret != NVML_SUCCESS) {
WARN("nvmlDeviceClearCpuAffinity() failed: %s ",
nvmlInternalErrorString(ret));
return ncclSystemError;
}
return ncclSuccess;
}
|
ca8f7d06fd3f3b31ab09b892c3908b3a148df1f9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__ void join(
TUPLE *rt,
TUPLE *lt,
RESULT *jt,
uint *count,
BUCKET *bucket,
int *buck_array,
int *idxcount,
int right,
int left
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
uint writeloc = count[x];
if(x < left){
int val = lt[x].val;
int idx = val % NB_BKT_ENT;
int idx_c = idxcount[idx];
int buck_a = buck_array[idx];
if(buck_a != -1){
int i = 0;
for(int k = 0; k < idx_c; k++){
if(bucket[buck_a + k].val == val){
jt[writeloc + i].lkey = lt[x].key;
jt[writeloc + i].lval = val;
jt[writeloc + i].rkey = rt[bucket[buck_a + k].adr].key;
jt[writeloc + i].rval = rt[bucket[buck_a + k].adr].val;
i++;
//printf("%d %d\n",jt[count[i] + k].rkey,jt[count[i] + k].lkey);
}
}
}
}
//shared memory experience
/*
__shared__ int ba[NB_BKT_ENT];
for(int i=0; i<NB_BKT_ENT ;i++){
ba[i] = buck_array[i];
}
__syncthreads();
int writeloc = 0;
if(y!=0){
writeloc = count[y-1];
}
if(y < right){
int idx = rt[y].val % NB_BKT_ENT;
if(ba[idx] != -1){
int i = 0;
for(int k = 0; k < idxcount[idx]; k++){
if(bucket[ba[idx] + k].val == rt[y].val){
jt[writeloc + i].rkey = rt[y].key;
jt[writeloc + i].rval = rt[y].val;
jt[writeloc + i].lkey = lt[bucket[ba[idx] + k].adr].key;
jt[writeloc + i].lval = lt[bucket[ba[idx] + k].adr].val;
i++;
//printf("%d %d\n",jt[count[i] + k].rkey,jt[count[i] + k].lkey);
}
}
}
}
*/
}
}
| ca8f7d06fd3f3b31ab09b892c3908b3a148df1f9.cu | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__ void join(
TUPLE *rt,
TUPLE *lt,
RESULT *jt,
uint *count,
BUCKET *bucket,
int *buck_array,
int *idxcount,
int right,
int left
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
uint writeloc = count[x];
if(x < left){
int val = lt[x].val;
int idx = val % NB_BKT_ENT;
int idx_c = idxcount[idx];
int buck_a = buck_array[idx];
if(buck_a != -1){
int i = 0;
for(int k = 0; k < idx_c; k++){
if(bucket[buck_a + k].val == val){
jt[writeloc + i].lkey = lt[x].key;
jt[writeloc + i].lval = val;
jt[writeloc + i].rkey = rt[bucket[buck_a + k].adr].key;
jt[writeloc + i].rval = rt[bucket[buck_a + k].adr].val;
i++;
//printf("%d %d\n",jt[count[i] + k].rkey,jt[count[i] + k].lkey);
}
}
}
}
//shared memory experience
/*
__shared__ int ba[NB_BKT_ENT];
for(int i=0; i<NB_BKT_ENT ;i++){
ba[i] = buck_array[i];
}
__syncthreads();
int writeloc = 0;
if(y!=0){
writeloc = count[y-1];
}
if(y < right){
int idx = rt[y].val % NB_BKT_ENT;
if(ba[idx] != -1){
int i = 0;
for(int k = 0; k < idxcount[idx]; k++){
if(bucket[ba[idx] + k].val == rt[y].val){
jt[writeloc + i].rkey = rt[y].key;
jt[writeloc + i].rval = rt[y].val;
jt[writeloc + i].lkey = lt[bucket[ba[idx] + k].adr].key;
jt[writeloc + i].lval = lt[bucket[ba[idx] + k].adr].val;
i++;
//printf("%d %d\n",jt[count[i] + k].rkey,jt[count[i] + k].lkey);
}
}
}
}
*/
}
}
|
db164840ad2a26994c2901834b8d5e3d080f5cf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************
:IANG JIWEI
:21M30519
:
MPICUDA
Flops
Runcode on Tusbame:
module load cuda gcc/8.3.0 openmpi
nvcc MPI_CUDA.cu -lmpi -Xcompiler "-O3 -fopenmp"
mpirun -np 4 ./a.out
*****************************************/
#include <mpi.h>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <chrono>
using namespace std;
__global__ void matmul(float *A, float *B, float *C, int N, int size) {
int i = blockIdx.x;
int j = threadIdx.x;
float sum = 0;
for (int k=0; k<N; k++) {
sum += A[N*i+k] * B[N/size*k+j];
}
C[N*i+j] = sum;
}
int main(int argc, char **argv) {
//init MPI
int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);//number of process
MPI_Comm_rank(MPI_COMM_WORLD, &rank);//process number
int N = 256;
int full_size = N * N * sizeof(float);
int sub_size = N * N /size * sizeof(float);
float *A, *B, *C, *subA, *subB, *subC, *recv;
hipMallocManaged(&A, full_size);
hipMallocManaged(&B, full_size);
hipMallocManaged(&C, full_size);
hipMallocManaged(&subA, sub_size);
hipMallocManaged(&subB, sub_size);
hipMallocManaged(&subC, sub_size);
hipMallocManaged(&recv, sub_size);
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
B[N*i+j] = drand48();
C[N*i+j] = 0;
}
}
//initial subMatrix in different rank
int offset = N/size*rank; //dim N separate into size
for (int i=0; i<N/size; i++)
for (int j=0; j<N; j++)
subA[N*i+j] = A[N*(i+offset)+j];//Seperate A by row
for (int i=0; i<N; i++)
for (int j=0; j<N/size; j++)
subB[N/size*i+j] = B[N*i+j+offset];//Seperate B by column
for (int i = 0; i < sub_size; i++){
subC[i] = 0;
}
int recv_from = (rank + 1) % size;
int send_to = (rank - 1 + size) % size;
double comp_time = 0, comm_time = 0;
for(int irank=0; irank<size; irank++) {
auto tic = chrono::steady_clock::now();//time
offset = N/size*((rank+irank) % size);
hipLaunchKernelGGL(( matmul), dim3(N/size),dim3(N/size), 0, 0, subA, subB, &subC[offset], N, size);
hipDeviceSynchronize();
auto toc = chrono::steady_clock::now();//time
comp_time += chrono::duration<double>(toc - tic).count();//computation time?
MPI_Request request[2];
MPI_Isend(&subB[0], N*N/size, MPI_FLOAT, send_to, 0, MPI_COMM_WORLD, &request[0]);
MPI_Irecv(&recv[0], N*N/size, MPI_FLOAT, recv_from, 0, MPI_COMM_WORLD, &request[1]);
MPI_Waitall(2, request, MPI_STATUS_IGNORE);
for (int i=0; i<N*N/size; i++)
subB[i] = recv[i];
tic = chrono::steady_clock::now();//time
comm_time += chrono::duration<double>(tic - toc).count();//communication time?
}
MPI_Allgather(&subC[0], N*N/size, MPI_FLOAT, &C[0], N*N/size, MPI_FLOAT, MPI_COMM_WORLD);
#pragma omp parallel for
for (int i=0; i<N; i++)
for (int k=0; k<N; k++)
for (int j=0; j<N; j++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
if(rank==0){
double time = comp_time+comm_time;//total time
printf("N : %d\n",N);
printf("comp : %lf s\n", comp_time);
printf("comm : %lf s\n", comm_time);
printf("total: %lf s (%lf GFlops)\n", time, 2.*N*N*N/time/1e9);
printf("error: %lf\n",err/N/N);
}
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(subA);
hipFree(subB);
hipFree(subC);
hipFree(recv);
MPI_Finalize();
}
| db164840ad2a26994c2901834b8d5e3d080f5cf0.cu | /****************************************
氏名:IANG JIWEI
学籍番号:21M30519
説明:
MPI+CUDAの並列化を実現したいですけど、うまく行けませんでした。
Flopsが出ませんでした。
Runcode on Tusbame:
module load cuda gcc/8.3.0 openmpi
nvcc MPI_CUDA.cu -lmpi -Xcompiler "-O3 -fopenmp"
mpirun -np 4 ./a.out
*****************************************/
#include <mpi.h>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <chrono>
using namespace std;
__global__ void matmul(float *A, float *B, float *C, int N, int size) {
int i = blockIdx.x;
int j = threadIdx.x;
float sum = 0;
for (int k=0; k<N; k++) {
sum += A[N*i+k] * B[N/size*k+j];
}
C[N*i+j] = sum;
}
int main(int argc, char **argv) {
//init MPI
int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);//number of process
MPI_Comm_rank(MPI_COMM_WORLD, &rank);//process number
int N = 256;
int full_size = N * N * sizeof(float);
int sub_size = N * N /size * sizeof(float);
float *A, *B, *C, *subA, *subB, *subC, *recv;
cudaMallocManaged(&A, full_size);
cudaMallocManaged(&B, full_size);
cudaMallocManaged(&C, full_size);
cudaMallocManaged(&subA, sub_size);
cudaMallocManaged(&subB, sub_size);
cudaMallocManaged(&subC, sub_size);
cudaMallocManaged(&recv, sub_size);
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
B[N*i+j] = drand48();
C[N*i+j] = 0;
}
}
//initial subMatrix in different rank
int offset = N/size*rank; //dim N separate into size
for (int i=0; i<N/size; i++)
for (int j=0; j<N; j++)
subA[N*i+j] = A[N*(i+offset)+j];//Seperate A by row
for (int i=0; i<N; i++)
for (int j=0; j<N/size; j++)
subB[N/size*i+j] = B[N*i+j+offset];//Seperate B by column
for (int i = 0; i < sub_size; i++){
subC[i] = 0;
}
int recv_from = (rank + 1) % size;
int send_to = (rank - 1 + size) % size;
double comp_time = 0, comm_time = 0;
for(int irank=0; irank<size; irank++) {
auto tic = chrono::steady_clock::now();//time
offset = N/size*((rank+irank) % size);
matmul<<<N/size,N/size>>>(subA, subB, &subC[offset], N, size);
cudaDeviceSynchronize();
auto toc = chrono::steady_clock::now();//time
comp_time += chrono::duration<double>(toc - tic).count();//computation time?
MPI_Request request[2];
MPI_Isend(&subB[0], N*N/size, MPI_FLOAT, send_to, 0, MPI_COMM_WORLD, &request[0]);
MPI_Irecv(&recv[0], N*N/size, MPI_FLOAT, recv_from, 0, MPI_COMM_WORLD, &request[1]);
MPI_Waitall(2, request, MPI_STATUS_IGNORE);
for (int i=0; i<N*N/size; i++)
subB[i] = recv[i];
tic = chrono::steady_clock::now();//time
comm_time += chrono::duration<double>(tic - toc).count();//communication time?
}
MPI_Allgather(&subC[0], N*N/size, MPI_FLOAT, &C[0], N*N/size, MPI_FLOAT, MPI_COMM_WORLD);
#pragma omp parallel for
for (int i=0; i<N; i++)
for (int k=0; k<N; k++)
for (int j=0; j<N; j++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
if(rank==0){
double time = comp_time+comm_time;//total time
printf("N : %d\n",N);
printf("comp : %lf s\n", comp_time);
printf("comm : %lf s\n", comm_time);
printf("total: %lf s (%lf GFlops)\n", time, 2.*N*N*N/time/1e9);
printf("error: %lf\n",err/N/N);
}
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(subA);
cudaFree(subB);
cudaFree(subC);
cudaFree(recv);
MPI_Finalize();
}
|
8403132f53c3da0bf89d3dc5abdfb22ed97bd68d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "quant_kernel.h"
#include "bit_helper.cu"
// quantize a float into a floating point with [exp_bits] exponent and
// [man_bits] mantissa
__global__ void float_kernel_stochastic(float* __restrict__ a,
int* __restrict__ r,
float* o, int size,
int man_bits,
int exp_bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
unsigned int rand_prob = (unsigned int) r[index];
unsigned int target,quantize_bits;
target = FLOAT_TO_BITS(&a[index]);
float quantized;
int target_exp = (target << 1 >> 1 >> 23) -127;
int min_exp = -((1 << (exp_bits - 1)) - 2);
bool subnormal = (target_exp < min_exp);
if (subnormal){
float shift_float,val;
int shift_bits = ((127+min_exp)<<23) | (target >> 31 <<31);
shift_float = BITS_TO_FLOAT(&shift_bits);
val=a[index]+shift_float;
target = FLOAT_TO_BITS(&val);
quantize_bits = round_bitwise_stochastic(target, rand_prob, man_bits);
quantized = BITS_TO_FLOAT(&quantize_bits) - shift_float;
}
else{
quantize_bits = round_bitwise_stochastic(target, rand_prob, man_bits);
quantize_bits = clip_exponent(exp_bits, man_bits, target, quantize_bits);
quantized = BITS_TO_FLOAT(&quantize_bits);
}
o[index] = quantized;
}
}
// quantize a float into a floating point with [exp_bits] exponent and
// [man_bits] mantissa
__global__ void float_kernel_nearest(float* __restrict__ a,
float* o, int size,
int man_bits,
int exp_bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
unsigned int target,quantize_bits;
target = FLOAT_TO_BITS(&a[index]);
float quantized;
int target_exp = (target << 1 >> 1 >> 23) -127;
int min_exp = -((1 << (exp_bits - 1)) - 2);
bool subnormal = (target_exp < min_exp);
if (subnormal){
float shift_float,val;
int shift_bits = ((127+min_exp)<<23) | (target >> 31 <<31);
shift_float = BITS_TO_FLOAT(&shift_bits);
val=a[index]+shift_float;
target = FLOAT_TO_BITS(&val);
quantize_bits = round_bitwise_nearest(target, man_bits);
quantized = BITS_TO_FLOAT(&quantize_bits) - shift_float;
}
else{
quantize_bits = round_bitwise_nearest(target, man_bits);
quantize_bits = clip_exponent(exp_bits, man_bits, target, quantize_bits);
quantized = BITS_TO_FLOAT(&quantize_bits);
}
o[index] = quantized;
}
}
| 8403132f53c3da0bf89d3dc5abdfb22ed97bd68d.cu | #include "quant_kernel.h"
#include "bit_helper.cu"
// quantize a float into a floating point with [exp_bits] exponent and
// [man_bits] mantissa
__global__ void float_kernel_stochastic(float* __restrict__ a,
int* __restrict__ r,
float* o, int size,
int man_bits,
int exp_bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
unsigned int rand_prob = (unsigned int) r[index];
unsigned int target,quantize_bits;
target = FLOAT_TO_BITS(&a[index]);
float quantized;
int target_exp = (target << 1 >> 1 >> 23) -127;
int min_exp = -((1 << (exp_bits - 1)) - 2);
bool subnormal = (target_exp < min_exp);
if (subnormal){
float shift_float,val;
int shift_bits = ((127+min_exp)<<23) | (target >> 31 <<31);
shift_float = BITS_TO_FLOAT(&shift_bits);
val=a[index]+shift_float;
target = FLOAT_TO_BITS(&val);
quantize_bits = round_bitwise_stochastic(target, rand_prob, man_bits);
quantized = BITS_TO_FLOAT(&quantize_bits) - shift_float;
}
else{
quantize_bits = round_bitwise_stochastic(target, rand_prob, man_bits);
quantize_bits = clip_exponent(exp_bits, man_bits, target, quantize_bits);
quantized = BITS_TO_FLOAT(&quantize_bits);
}
o[index] = quantized;
}
}
// quantize a float into a floating point with [exp_bits] exponent and
// [man_bits] mantissa
__global__ void float_kernel_nearest(float* __restrict__ a,
float* o, int size,
int man_bits,
int exp_bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
unsigned int target,quantize_bits;
target = FLOAT_TO_BITS(&a[index]);
float quantized;
int target_exp = (target << 1 >> 1 >> 23) -127;
int min_exp = -((1 << (exp_bits - 1)) - 2);
bool subnormal = (target_exp < min_exp);
if (subnormal){
float shift_float,val;
int shift_bits = ((127+min_exp)<<23) | (target >> 31 <<31);
shift_float = BITS_TO_FLOAT(&shift_bits);
val=a[index]+shift_float;
target = FLOAT_TO_BITS(&val);
quantize_bits = round_bitwise_nearest(target, man_bits);
quantized = BITS_TO_FLOAT(&quantize_bits) - shift_float;
}
else{
quantize_bits = round_bitwise_nearest(target, man_bits);
quantize_bits = clip_exponent(exp_bits, man_bits, target, quantize_bits);
quantized = BITS_TO_FLOAT(&quantize_bits);
}
o[index] = quantized;
}
}
|
9a411cde881a52b28dec0998ce57c0add5a15b68.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.hpp"
#include "HugeCTR/include/tensor2.hpp"
#include "HugeCTR/include/utils.cuh"
using namespace HugeCTR;
using namespace hybrid_embedding;
namespace {
template <typename dtype, typename emtype = float>
void data_test() {
size_t batch_size = 4;
size_t num_iterations = 2;
std::vector<size_t> table_sizes{100, 10, 10, 20};
std::vector<dtype> data_in{99, 3, 7, 19, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 50, 2, 4, 10, 2, 2, 2, 2, 1, 1, 1, 1};
std::vector<dtype> data_to_unique_categories_ref{
99, 103, 117, 139, 0, 100, 110, 120, 1, 101, 111, 121, 2, 102, 112, 122,
3, 103, 113, 123, 50, 102, 114, 130, 2, 102, 112, 122, 1, 101, 111, 121};
Tensor2<dtype> d_data_in;
// std::cout << "debug2" << std::endl;
std::shared_ptr<GeneralBuffer2<CudaAllocator>> buff = GeneralBuffer2<CudaAllocator>::create();
buff->reserve({batch_size * num_iterations * table_sizes.size()}, &d_data_in);
buff->allocate();
upload_tensor(data_in, d_data_in, 0);
// std::cout << "debug3" << std::endl;
Data<dtype> data(table_sizes, batch_size, num_iterations);
// std::cout << "debug" << std::endl;
data.data_to_unique_categories(d_data_in, 0);
// std::cout << "debug1" << std::endl;
std::vector<dtype> data_to_unique_categories_ret;
download_tensor(data_to_unique_categories_ret, data.samples, 0);
EXPECT_THAT(data_to_unique_categories_ret,
::testing::ElementsAreArray(data_to_unique_categories_ref));
};
} // namespace
namespace HugeCTR {
namespace hybrid_embedding {
template <typename dtype>
void test_raw_data(dtype *d_raw_data,
size_t num_samples,
size_t num_tables,
size_t num_iterations,
const std::vector<size_t> &table_sizes) {
size_t num_elements = num_samples * num_tables * num_iterations;
std::vector<dtype> h_raw_data(num_elements, (dtype) 0);
hipStream_t stream = 0;
CK_CUDA_THROW_(
hipMemcpyAsync(h_raw_data.data(),
d_raw_data, num_elements * sizeof(dtype),
hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipStreamSynchronize(stream));
for (size_t iteration = 0; iteration < num_iterations; ++iteration) {
for (size_t sample = 0; sample < num_samples; ++sample) {
for (size_t embedding = 0; embedding < num_tables; ++embedding) {
size_t category =
(size_t) h_raw_data[iteration*num_samples*num_tables + sample*num_tables + embedding];
EXPECT_TRUE(category < table_sizes[embedding]);
}
}
}
}
template <typename dtype>
void test_samples(dtype *d_raw_data, Data<dtype> &data) {
const size_t num_iterations = data.num_iterations;
const size_t num_samples = data.batch_size;
const size_t num_tables = data.table_sizes.size();
size_t num_elements = num_iterations * num_samples * num_tables;
const size_t num_categories = EmbeddingTableFunctors<dtype>::get_num_categories(data.table_sizes);
std::vector<dtype> embedding_offsets;
EmbeddingTableFunctors<dtype>::get_embedding_offsets(embedding_offsets, data.table_sizes);
hipStream_t stream = 0;
std::vector<dtype> h_raw_data(num_elements, (dtype) 0);
CK_CUDA_THROW_(
hipMemcpyAsync(h_raw_data.data(),
d_raw_data, num_elements * sizeof(dtype),
hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipStreamSynchronize(stream));
std::vector<dtype> h_samples;
download_tensor(h_samples, data.samples, stream);
for (size_t iteration = 0; iteration < num_iterations; ++iteration) {
for (size_t sample = 0; sample < num_samples; ++sample) {
for (size_t embedding = 0; embedding < num_tables; ++embedding) {
size_t indx = iteration * num_samples * num_tables + sample * num_tables + embedding;
size_t unique_category = (size_t) h_samples[indx];
size_t category_samples = (size_t) unique_category - embedding_offsets[embedding];
size_t category_data = (size_t) h_raw_data[indx];
EXPECT_TRUE(category_samples == category_data);
EXPECT_TRUE(unique_category < num_categories);
}
}
}
}
template void test_raw_data<uint32_t>(uint32_t *d_raw_data, size_t num_samples, size_t num_tables,
size_t num_iterations, const std::vector<size_t> &table_sizes);
template void test_raw_data<long long>(long long *d_raw_data, size_t num_samples, size_t num_tables,
size_t num_iterations, const std::vector<size_t> &table_sizes);
template void test_samples<uint32_t>(uint32_t *d_raw_data, Data<uint32_t> &data);
template void test_samples<long long>(long long *d_raw_data, Data<long long> &data);
}
}
TEST(data_test, uint32) { data_test<uint32_t>(); };
TEST(data_test, long_long) { data_test<long long>(); };
| 9a411cde881a52b28dec0998ce57c0add5a15b68.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.hpp"
#include "HugeCTR/include/tensor2.hpp"
#include "HugeCTR/include/utils.cuh"
using namespace HugeCTR;
using namespace hybrid_embedding;
namespace {
template <typename dtype, typename emtype = float>
void data_test() {
size_t batch_size = 4;
size_t num_iterations = 2;
std::vector<size_t> table_sizes{100, 10, 10, 20};
std::vector<dtype> data_in{99, 3, 7, 19, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 50, 2, 4, 10, 2, 2, 2, 2, 1, 1, 1, 1};
std::vector<dtype> data_to_unique_categories_ref{
99, 103, 117, 139, 0, 100, 110, 120, 1, 101, 111, 121, 2, 102, 112, 122,
3, 103, 113, 123, 50, 102, 114, 130, 2, 102, 112, 122, 1, 101, 111, 121};
Tensor2<dtype> d_data_in;
// std::cout << "debug2" << std::endl;
std::shared_ptr<GeneralBuffer2<CudaAllocator>> buff = GeneralBuffer2<CudaAllocator>::create();
buff->reserve({batch_size * num_iterations * table_sizes.size()}, &d_data_in);
buff->allocate();
upload_tensor(data_in, d_data_in, 0);
// std::cout << "debug3" << std::endl;
Data<dtype> data(table_sizes, batch_size, num_iterations);
// std::cout << "debug" << std::endl;
data.data_to_unique_categories(d_data_in, 0);
// std::cout << "debug1" << std::endl;
std::vector<dtype> data_to_unique_categories_ret;
download_tensor(data_to_unique_categories_ret, data.samples, 0);
EXPECT_THAT(data_to_unique_categories_ret,
::testing::ElementsAreArray(data_to_unique_categories_ref));
};
} // namespace
namespace HugeCTR {
namespace hybrid_embedding {
template <typename dtype>
void test_raw_data(dtype *d_raw_data,
size_t num_samples,
size_t num_tables,
size_t num_iterations,
const std::vector<size_t> &table_sizes) {
size_t num_elements = num_samples * num_tables * num_iterations;
std::vector<dtype> h_raw_data(num_elements, (dtype) 0);
cudaStream_t stream = 0;
CK_CUDA_THROW_(
cudaMemcpyAsync(h_raw_data.data(),
d_raw_data, num_elements * sizeof(dtype),
cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaStreamSynchronize(stream));
for (size_t iteration = 0; iteration < num_iterations; ++iteration) {
for (size_t sample = 0; sample < num_samples; ++sample) {
for (size_t embedding = 0; embedding < num_tables; ++embedding) {
size_t category =
(size_t) h_raw_data[iteration*num_samples*num_tables + sample*num_tables + embedding];
EXPECT_TRUE(category < table_sizes[embedding]);
}
}
}
}
template <typename dtype>
void test_samples(dtype *d_raw_data, Data<dtype> &data) {
const size_t num_iterations = data.num_iterations;
const size_t num_samples = data.batch_size;
const size_t num_tables = data.table_sizes.size();
size_t num_elements = num_iterations * num_samples * num_tables;
const size_t num_categories = EmbeddingTableFunctors<dtype>::get_num_categories(data.table_sizes);
std::vector<dtype> embedding_offsets;
EmbeddingTableFunctors<dtype>::get_embedding_offsets(embedding_offsets, data.table_sizes);
cudaStream_t stream = 0;
std::vector<dtype> h_raw_data(num_elements, (dtype) 0);
CK_CUDA_THROW_(
cudaMemcpyAsync(h_raw_data.data(),
d_raw_data, num_elements * sizeof(dtype),
cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaStreamSynchronize(stream));
std::vector<dtype> h_samples;
download_tensor(h_samples, data.samples, stream);
for (size_t iteration = 0; iteration < num_iterations; ++iteration) {
for (size_t sample = 0; sample < num_samples; ++sample) {
for (size_t embedding = 0; embedding < num_tables; ++embedding) {
size_t indx = iteration * num_samples * num_tables + sample * num_tables + embedding;
size_t unique_category = (size_t) h_samples[indx];
size_t category_samples = (size_t) unique_category - embedding_offsets[embedding];
size_t category_data = (size_t) h_raw_data[indx];
EXPECT_TRUE(category_samples == category_data);
EXPECT_TRUE(unique_category < num_categories);
}
}
}
}
template void test_raw_data<uint32_t>(uint32_t *d_raw_data, size_t num_samples, size_t num_tables,
size_t num_iterations, const std::vector<size_t> &table_sizes);
template void test_raw_data<long long>(long long *d_raw_data, size_t num_samples, size_t num_tables,
size_t num_iterations, const std::vector<size_t> &table_sizes);
template void test_samples<uint32_t>(uint32_t *d_raw_data, Data<uint32_t> &data);
template void test_samples<long long>(long long *d_raw_data, Data<long long> &data);
}
}
TEST(data_test, uint32) { data_test<uint32_t>(); };
TEST(data_test, long_long) { data_test<long long>(); };
|
b901f26963859af75986c417c71c4b5c77adc072.hip | // !!! This is a file automatically generated by hipify!!!
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_domain_wall_4d.cuh>
/**
This is the gauged domain-wall 4-d preconditioned operator.
Note, for now, this just applies a batched 4-d dslash across the fifth
dimension.
*/
namespace quda
{
template <typename Arg> class DomainWall4D : public Dslash<domainWall4D, Arg>
{
using Dslash = Dslash<domainWall4D, Arg>;
using Dslash::arg;
using Dslash::in;
public:
DomainWall4D(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash(arg, out, in)
{
TunableVectorYZ::resizeVector(in.X(4), arg.nParity);
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash::setParam(tp);
typedef typename mapper<typename Arg::Float>::type real;
#ifdef JITIFY
// we need to break the dslash launch abstraction here to get a handle on the constant memory pointer in the kernel module
auto instance = Dslash::template kernel_instance<packShmem>();
cuMemcpyHtoDAsync(instance.get_constant_ptr("quda::mobius_d"), arg.a_5, QUDA_MAX_DWF_LS * sizeof(complex<real>),
stream);
Tunable::jitify_error = instance.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
hipMemcpyToSymbolAsync(mobius_d, arg.a_5, QUDA_MAX_DWF_LS * sizeof(complex<real>), 0, hipMemcpyHostToDevice,
streams[Nstream - 1]);
Dslash::template instantiate<packShmem>(tp, stream);
#endif
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct DomainWall4DApply {
inline DomainWall4DApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a,
double m_5, const Complex *b_5, const Complex *c_5, const ColorSpinorField &x, int parity,
bool dagger, const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4;
DomainWall4DArg<Float, nColor, nDim, recon> arg(out, in, U, a, m_5, b_5, c_5, a != 0.0, x, parity, dagger,
comm_override);
DomainWall4D<decltype(arg)> dwf(arg, out, in);
dslash::DslashPolicyTune<decltype(dwf)> policy(
dwf, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)),
in.getDslashConstant().volume_4d_cb, in.getDslashConstant().ghostFaceCB, profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the 4-d preconditioned domain-wall Dslash operator
// out(x) = M*in = in(x) + a*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
void ApplyDomainWall4D(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double m_5,
const Complex *b_5, const Complex *c_5, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_DOMAIN_WALL_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, x, U);
// check all locations match
checkLocation(out, in, x, U);
instantiate<DomainWall4DApply>(out, in, U, a, m_5, b_5, c_5, x, parity, dagger, comm_override, profile);
#else
errorQuda("Domain-wall dslash has not been built");
#endif // GPU_DOMAIN_WALL_DIRAC
}
} // namespace quda
| b901f26963859af75986c417c71c4b5c77adc072.cu | #include <gauge_field.h>
#include <color_spinor_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_domain_wall_4d.cuh>
/**
This is the gauged domain-wall 4-d preconditioned operator.
Note, for now, this just applies a batched 4-d dslash across the fifth
dimension.
*/
namespace quda
{
template <typename Arg> class DomainWall4D : public Dslash<domainWall4D, Arg>
{
using Dslash = Dslash<domainWall4D, Arg>;
using Dslash::arg;
using Dslash::in;
public:
DomainWall4D(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash(arg, out, in)
{
TunableVectorYZ::resizeVector(in.X(4), arg.nParity);
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash::setParam(tp);
typedef typename mapper<typename Arg::Float>::type real;
#ifdef JITIFY
// we need to break the dslash launch abstraction here to get a handle on the constant memory pointer in the kernel module
auto instance = Dslash::template kernel_instance<packShmem>();
cuMemcpyHtoDAsync(instance.get_constant_ptr("quda::mobius_d"), arg.a_5, QUDA_MAX_DWF_LS * sizeof(complex<real>),
stream);
Tunable::jitify_error = instance.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
cudaMemcpyToSymbolAsync(mobius_d, arg.a_5, QUDA_MAX_DWF_LS * sizeof(complex<real>), 0, cudaMemcpyHostToDevice,
streams[Nstream - 1]);
Dslash::template instantiate<packShmem>(tp, stream);
#endif
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct DomainWall4DApply {
inline DomainWall4DApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a,
double m_5, const Complex *b_5, const Complex *c_5, const ColorSpinorField &x, int parity,
bool dagger, const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4;
DomainWall4DArg<Float, nColor, nDim, recon> arg(out, in, U, a, m_5, b_5, c_5, a != 0.0, x, parity, dagger,
comm_override);
DomainWall4D<decltype(arg)> dwf(arg, out, in);
dslash::DslashPolicyTune<decltype(dwf)> policy(
dwf, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)),
in.getDslashConstant().volume_4d_cb, in.getDslashConstant().ghostFaceCB, profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the 4-d preconditioned domain-wall Dslash operator
// out(x) = M*in = in(x) + a*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
void ApplyDomainWall4D(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double m_5,
const Complex *b_5, const Complex *c_5, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_DOMAIN_WALL_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, x, U);
// check all locations match
checkLocation(out, in, x, U);
instantiate<DomainWall4DApply>(out, in, U, a, m_5, b_5, c_5, x, parity, dagger, comm_override, profile);
#else
errorQuda("Domain-wall dslash has not been built");
#endif // GPU_DOMAIN_WALL_DIRAC
}
} // namespace quda
|
a54a81aca4944dcb303bc9b47cfcd5f1a38767aa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.h"
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <vector>
#include <raft/cluster/kmeans.cuh>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
template <typename T>
struct KmeansFindKInputs {
int n_row;
int n_col;
int n_clusters;
T tol;
bool weighted;
};
template <typename T>
class KmeansFindKTest : public ::testing::TestWithParam<KmeansFindKInputs<T>> {
protected:
KmeansFindKTest()
: stream(resource::get_cuda_stream(handle)), best_k(raft::make_host_scalar<int>(0))
{
}
void basicTest()
{
testparams = ::testing::TestWithParam<KmeansFindKInputs<T>>::GetParam();
int n_samples = testparams.n_row;
int n_features = testparams.n_col;
int n_clusters = testparams.n_clusters;
auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features);
auto labels = raft::make_device_vector<int, int>(handle, n_samples);
raft::random::make_blobs<T, int>(X.data_handle(),
labels.data_handle(),
n_samples,
n_features,
n_clusters,
stream,
true,
nullptr,
nullptr,
T(.001),
false,
(T)-10.0f,
(T)10.0f,
(uint64_t)1234);
auto inertia = raft::make_host_scalar<T>(0);
auto n_iter = raft::make_host_scalar<int>(0);
auto X_view =
raft::make_device_matrix_view<const T, int>(X.data_handle(), X.extent(0), X.extent(1));
raft::cluster::kmeans::find_k<int, T>(
handle, X_view, best_k.view(), inertia.view(), n_iter.view(), n_clusters);
resource::sync_stream(handle, stream);
}
void SetUp() override { basicTest(); }
protected:
raft::resources handle;
hipStream_t stream;
KmeansFindKInputs<T> testparams;
raft::host_scalar<int> best_k;
};
const std::vector<KmeansFindKInputs<float>> inputsf2 = {{1000, 32, 8, 0.001f, true},
{1000, 32, 8, 0.001f, false},
{1000, 100, 20, 0.001f, true},
{1000, 100, 20, 0.001f, false},
{10000, 32, 10, 0.001f, true},
{10000, 32, 10, 0.001f, false},
{10000, 100, 50, 0.001f, true},
{10000, 100, 50, 0.001f, false},
{10000, 500, 100, 0.001f, true},
{10000, 500, 100, 0.001f, false}};
const std::vector<KmeansFindKInputs<double>> inputsd2 = {{1000, 32, 5, 0.0001, true},
{1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true},
{1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true},
{10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true},
{10000, 100, 50, 0.0001, false},
{10000, 500, 100, 0.0001, true},
{10000, 500, 100, 0.0001, false}};
typedef KmeansFindKTest<float> KmeansFindKTestF;
TEST_P(KmeansFindKTestF, Result)
{
if (best_k.view()[0] != testparams.n_clusters) {
std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl;
}
ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters);
}
typedef KmeansFindKTest<double> KmeansFindKTestD;
TEST_P(KmeansFindKTestD, Result)
{
if (best_k.view()[0] != testparams.n_clusters) {
std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl;
}
ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters);
}
INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestD, ::testing::ValuesIn(inputsd2));
} // namespace raft
| a54a81aca4944dcb303bc9b47cfcd5f1a38767aa.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.h"
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <vector>
#include <raft/cluster/kmeans.cuh>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
template <typename T>
struct KmeansFindKInputs {
int n_row;
int n_col;
int n_clusters;
T tol;
bool weighted;
};
template <typename T>
class KmeansFindKTest : public ::testing::TestWithParam<KmeansFindKInputs<T>> {
protected:
KmeansFindKTest()
: stream(resource::get_cuda_stream(handle)), best_k(raft::make_host_scalar<int>(0))
{
}
void basicTest()
{
testparams = ::testing::TestWithParam<KmeansFindKInputs<T>>::GetParam();
int n_samples = testparams.n_row;
int n_features = testparams.n_col;
int n_clusters = testparams.n_clusters;
auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features);
auto labels = raft::make_device_vector<int, int>(handle, n_samples);
raft::random::make_blobs<T, int>(X.data_handle(),
labels.data_handle(),
n_samples,
n_features,
n_clusters,
stream,
true,
nullptr,
nullptr,
T(.001),
false,
(T)-10.0f,
(T)10.0f,
(uint64_t)1234);
auto inertia = raft::make_host_scalar<T>(0);
auto n_iter = raft::make_host_scalar<int>(0);
auto X_view =
raft::make_device_matrix_view<const T, int>(X.data_handle(), X.extent(0), X.extent(1));
raft::cluster::kmeans::find_k<int, T>(
handle, X_view, best_k.view(), inertia.view(), n_iter.view(), n_clusters);
resource::sync_stream(handle, stream);
}
void SetUp() override { basicTest(); }
protected:
raft::resources handle;
cudaStream_t stream;
KmeansFindKInputs<T> testparams;
raft::host_scalar<int> best_k;
};
const std::vector<KmeansFindKInputs<float>> inputsf2 = {{1000, 32, 8, 0.001f, true},
{1000, 32, 8, 0.001f, false},
{1000, 100, 20, 0.001f, true},
{1000, 100, 20, 0.001f, false},
{10000, 32, 10, 0.001f, true},
{10000, 32, 10, 0.001f, false},
{10000, 100, 50, 0.001f, true},
{10000, 100, 50, 0.001f, false},
{10000, 500, 100, 0.001f, true},
{10000, 500, 100, 0.001f, false}};
const std::vector<KmeansFindKInputs<double>> inputsd2 = {{1000, 32, 5, 0.0001, true},
{1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true},
{1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true},
{10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true},
{10000, 100, 50, 0.0001, false},
{10000, 500, 100, 0.0001, true},
{10000, 500, 100, 0.0001, false}};
typedef KmeansFindKTest<float> KmeansFindKTestF;
TEST_P(KmeansFindKTestF, Result)
{
if (best_k.view()[0] != testparams.n_clusters) {
std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl;
}
ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters);
}
typedef KmeansFindKTest<double> KmeansFindKTestD;
TEST_P(KmeansFindKTestD, Result)
{
if (best_k.view()[0] != testparams.n_clusters) {
std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl;
}
ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters);
}
INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestD, ::testing::ValuesIn(inputsd2));
} // namespace raft
|
9ca6ca1e6148711004d9bcf360d11b7322625784.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_4_right [3][2];
static int dims_update_halo_kernel2_yvel_plus_4_right_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_right_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(-4,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(-4,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[0][0] * dims_update_halo_kernel2_yvel_plus_4_right[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[1][0] * dims_update_halo_kernel2_yvel_plus_4_right[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_4_right[0][0], dims_update_halo_kernel2_yvel_plus_4_right[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_4_right[1][0], dims_update_halo_kernel2_yvel_plus_4_right[1][1], arg1);
update_halo_kernel2_yvel_plus_4_right_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,42)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(42,"update_halo_kernel2_yvel_plus_4_right");
OPS_kernels[42].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_4_right_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_4_right_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_4_right_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_4_right_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_4_right_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_4_right_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_4_right_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_4_right_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_4_right, dims_update_halo_kernel2_yvel_plus_4_right_h, sizeof(dims_update_halo_kernel2_yvel_plus_4_right)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[42].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[42].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[42].mpi_time += t2-t1;
OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 42;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 42;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(42,"update_halo_kernel2_yvel_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 9ca6ca1e6148711004d9bcf360d11b7322625784.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_4_right [3][2];
static int dims_update_halo_kernel2_yvel_plus_4_right_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_right_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(-4,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(-4,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[0][0] * dims_update_halo_kernel2_yvel_plus_4_right[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_4_right[1][0] * dims_update_halo_kernel2_yvel_plus_4_right[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_4_right[0][0], dims_update_halo_kernel2_yvel_plus_4_right[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_4_right[1][0], dims_update_halo_kernel2_yvel_plus_4_right[1][1], arg1);
update_halo_kernel2_yvel_plus_4_right_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,42)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(42,"update_halo_kernel2_yvel_plus_4_right");
OPS_kernels[42].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_4_right_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_4_right_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_4_right_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_4_right_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_4_right_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_4_right_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_4_right_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_4_right_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_4_right, dims_update_halo_kernel2_yvel_plus_4_right_h, sizeof(dims_update_halo_kernel2_yvel_plus_4_right)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[42].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[42].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[42].mpi_time += t2-t1;
OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 42;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 42;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(42,"update_halo_kernel2_yvel_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
0550fe230943787f5b55852ace2142cf420725ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matmultKernel.h"
// Define a gpu kernel to perform matrix multiplication
// of A x B = C.
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){
// matrix blocks
float *Asub, *Bsub, *Csub;
// Putting these into registers speeds access.
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
// Each THREAD BLOCK computes one sub matrix Csub of C
// EACH THREAD creates its own matrix descriptor Csub
Csub = &C.elements[(C.stride * (BLOCK_SIZE * 2) * block_row) + (BLOCK_SIZE * 2) * block_col];
// Each thread computes one element of Csub in its copy of CValue
float Cvalue00 = 0;
float Cvalue01 = 0;
float Cvalue10 = 0;
float Cvalue11 = 0;
// Loop over all sub matrices in block_row of A and block_col of B
// required to compute Csub. Block multiply each pair of sub matrices
// and accumulate results
for (int m = 0; m < (A.width / (BLOCK_SIZE * 2)); ++m) {
// Get Asub and Bsub descriptors
Asub = &A.elements[A.stride * (BLOCK_SIZE * 2) * block_row + (BLOCK_SIZE * 2) * m];
Bsub = &B.elements[B.stride * (BLOCK_SIZE * 2) * m + (BLOCK_SIZE * 2) * block_col];
// Copy ELEMENTS OF ASub and Bsub into shared memory
// EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub
// Notice: it does not need to be the element it requires to
// compute its Cvalue, as long as all elements are
// collaboratively read.
// Notice: every thread declares shared_A and shared_B in shared memory
// even though a thread block has only one shared_A and one shared_B
__shared__ float shared_A[(BLOCK_SIZE * 2)][(BLOCK_SIZE * 2)];
__shared__ float shared_B[(BLOCK_SIZE * 2)][(BLOCK_SIZE * 2)];
// Each thread copies just one element of shared_A and one element of shared_B
shared_A[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col];
shared_A[thread_row][thread_col + BLOCK_SIZE] = Asub[thread_row * A.stride + (thread_col + BLOCK_SIZE)];
shared_A[thread_row + BLOCK_SIZE][thread_col] = Asub[(thread_row + BLOCK_SIZE) * A.stride + thread_col];
shared_A[thread_row + BLOCK_SIZE][thread_col + BLOCK_SIZE] = Asub[(thread_row + BLOCK_SIZE) * A.stride + (thread_col + BLOCK_SIZE)];
shared_B[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col];
shared_B[thread_row][thread_col + BLOCK_SIZE] = Bsub[thread_row * B.stride + (thread_col + BLOCK_SIZE)];
shared_B[thread_row + BLOCK_SIZE][thread_col] = Bsub[(thread_row + BLOCK_SIZE) * B.stride + thread_col];
shared_B[thread_row + BLOCK_SIZE][thread_col + BLOCK_SIZE] = Bsub[(thread_row + BLOCK_SIZE) * B.stride + (thread_col + BLOCK_SIZE)];
// Synchronize to ensure all elements are read
__syncthreads();
// Do an inproduct of one row of shared_A and one col of shared_B
// computing one Cvalue by accumulation
#pragma unroll
for(int e = 0; e < (BLOCK_SIZE * 2); ++e) {
Cvalue00 += shared_A[thread_row][e] * shared_B[e][thread_col];
Cvalue01 += shared_A[thread_row][e] * shared_B[e][thread_col + BLOCK_SIZE];
Cvalue10 += shared_A[thread_row + BLOCK_SIZE][e] * shared_B[e][thread_col];
Cvalue11 += shared_A[(thread_row + BLOCK_SIZE)][e] * shared_B[e][thread_col + BLOCK_SIZE];
}
// Synchronize to ensure all Cvalues have been incremented
// before reading in the next shared_A AND shared_B BLOCKS
__syncthreads();
}
// Write Csub to GLOBAL memory.
// Each thread writes its own cell value.
Csub[(thread_row * C.stride) + thread_col] = Cvalue00;
Csub[(thread_row * C.stride) + (thread_col + BLOCK_SIZE)] = Cvalue01;
Csub[((thread_row + BLOCK_SIZE) * C.stride) + thread_col] = Cvalue10;
Csub[((thread_row + BLOCK_SIZE) * C.stride) + (thread_col + BLOCK_SIZE)] = Cvalue11;
}
| 0550fe230943787f5b55852ace2142cf420725ca.cu |
#include "matmultKernel.h"
// Define a gpu kernel to perform matrix multiplication
// of A x B = C.
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){
// matrix blocks
float *Asub, *Bsub, *Csub;
// Putting these into registers speeds access.
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
// Each THREAD BLOCK computes one sub matrix Csub of C
// EACH THREAD creates its own matrix descriptor Csub
Csub = &C.elements[(C.stride * (BLOCK_SIZE * 2) * block_row) + (BLOCK_SIZE * 2) * block_col];
// Each thread computes one element of Csub in its copy of CValue
float Cvalue00 = 0;
float Cvalue01 = 0;
float Cvalue10 = 0;
float Cvalue11 = 0;
// Loop over all sub matrices in block_row of A and block_col of B
// required to compute Csub. Block multiply each pair of sub matrices
// and accumulate results
for (int m = 0; m < (A.width / (BLOCK_SIZE * 2)); ++m) {
// Get Asub and Bsub descriptors
Asub = &A.elements[A.stride * (BLOCK_SIZE * 2) * block_row + (BLOCK_SIZE * 2) * m];
Bsub = &B.elements[B.stride * (BLOCK_SIZE * 2) * m + (BLOCK_SIZE * 2) * block_col];
// Copy ELEMENTS OF ASub and Bsub into shared memory
// EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub
// Notice: it does not need to be the element it requires to
// compute its Cvalue, as long as all elements are
// collaboratively read.
// Notice: every thread declares shared_A and shared_B in shared memory
// even though a thread block has only one shared_A and one shared_B
__shared__ float shared_A[(BLOCK_SIZE * 2)][(BLOCK_SIZE * 2)];
__shared__ float shared_B[(BLOCK_SIZE * 2)][(BLOCK_SIZE * 2)];
// Each thread copies just one element of shared_A and one element of shared_B
shared_A[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col];
shared_A[thread_row][thread_col + BLOCK_SIZE] = Asub[thread_row * A.stride + (thread_col + BLOCK_SIZE)];
shared_A[thread_row + BLOCK_SIZE][thread_col] = Asub[(thread_row + BLOCK_SIZE) * A.stride + thread_col];
shared_A[thread_row + BLOCK_SIZE][thread_col + BLOCK_SIZE] = Asub[(thread_row + BLOCK_SIZE) * A.stride + (thread_col + BLOCK_SIZE)];
shared_B[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col];
shared_B[thread_row][thread_col + BLOCK_SIZE] = Bsub[thread_row * B.stride + (thread_col + BLOCK_SIZE)];
shared_B[thread_row + BLOCK_SIZE][thread_col] = Bsub[(thread_row + BLOCK_SIZE) * B.stride + thread_col];
shared_B[thread_row + BLOCK_SIZE][thread_col + BLOCK_SIZE] = Bsub[(thread_row + BLOCK_SIZE) * B.stride + (thread_col + BLOCK_SIZE)];
// Synchronize to ensure all elements are read
__syncthreads();
// Do an inproduct of one row of shared_A and one col of shared_B
// computing one Cvalue by accumulation
#pragma unroll
for(int e = 0; e < (BLOCK_SIZE * 2); ++e) {
Cvalue00 += shared_A[thread_row][e] * shared_B[e][thread_col];
Cvalue01 += shared_A[thread_row][e] * shared_B[e][thread_col + BLOCK_SIZE];
Cvalue10 += shared_A[thread_row + BLOCK_SIZE][e] * shared_B[e][thread_col];
Cvalue11 += shared_A[(thread_row + BLOCK_SIZE)][e] * shared_B[e][thread_col + BLOCK_SIZE];
}
// Synchronize to ensure all Cvalues have been incremented
// before reading in the next shared_A AND shared_B BLOCKS
__syncthreads();
}
// Write Csub to GLOBAL memory.
// Each thread writes its own cell value.
Csub[(thread_row * C.stride) + thread_col] = Cvalue00;
Csub[(thread_row * C.stride) + (thread_col + BLOCK_SIZE)] = Cvalue01;
Csub[((thread_row + BLOCK_SIZE) * C.stride) + thread_col] = Cvalue10;
Csub[((thread_row + BLOCK_SIZE) * C.stride) + (thread_col + BLOCK_SIZE)] = Cvalue11;
}
|
0907c8a7b87ac4f7b90115db9bed8290324cfea6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "FDTD3dGPU.h"
#include <iostream>
#include <algorithm>
#include <cutil_inline.h>
#include <shrUtils.h>
#include "FDTD3dGPUKernel.cuh"
bool getTargetDeviceGlobalMemSize(memsize_t *result, const int argc, const char **argv)
{
bool ok = true;
int deviceCount = 0;
int targetDevice = 0;
size_t memsize = 0;
hipError_t errnum = hipSuccess;
// Get the number of CUDA enabled GPU devices
if (ok)
{
shrLog(" hipGetDeviceCount\n");
errnum = hipGetDeviceCount(&deviceCount);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipGetDeviceCount '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Select target device (device 0 by default)
if (ok)
{
char *device = 0;
if (shrGetCmdLineArgumentstr(argc, argv, "device", &device))
{
targetDevice = (unsigned int)atoi(device);
if (targetDevice >= deviceCount)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "invalid target device specified on command line (device %d does not exist).\n", targetDevice);
ok = false;
}
}
else
{
targetDevice = cutGetMaxGflopsDeviceId();
}
if (device)
free(device);
}
// Query target device for maximum memory allocation
if (ok)
{
shrLog(" hipGetDeviceProperties\n");
struct hipDeviceProp_t deviceProp;
errnum = hipGetDeviceProperties(&deviceProp, targetDevice);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipGetDeviceProperties '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
memsize = deviceProp.totalGlobalMem;
}
// Save the result
if (ok)
{
*result = (memsize_t)memsize;
}
return ok;
}
bool fdtdGPU(float *output, const float *input, const float *coeff, const int dimx, const int dimy, const int dimz, const int radius, const int timesteps, const int argc, const char **argv)
{
bool ok = true;
const int outerDimx = dimx + 2 * radius;
const int outerDimy = dimy + 2 * radius;
const int outerDimz = dimz + 2 * radius;
const size_t volumeSize = outerDimx * outerDimy * outerDimz;
int deviceCount = 0;
int targetDevice = 0;
float *bufferOut = 0;
float *bufferIn = 0;
dim3 dimBlock;
dim3 dimGrid;
hipError_t errnum = hipSuccess;
// Ensure that the inner data starts on a 128B boundary
const int padding = (128 / sizeof(float)) - radius;
const size_t paddedVolumeSize = volumeSize + padding;
#ifdef GPU_PROFILING
hipEvent_t profileStart = 0;
hipEvent_t profileEnd = 0;
const int profileTimesteps = timesteps - 1;
if (ok)
{
if (profileTimesteps < 1)
{
shrLog(" cannot profile with fewer than two timesteps (timesteps=%d), profiling is disabled.\n", timesteps);
}
}
#endif
// Check the radius is valid
if (ok)
{
if (radius != RADIUS)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "radius is invalid, must be %d - see kernel for details.\n", RADIUS);
ok = false;
}
}
// Get the number of CUDA enabled GPU devices
if (ok)
{
shrLog(" hipGetDeviceCount\n");
errnum = hipGetDeviceCount(&deviceCount);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipGetDeviceCount '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Select target device (device 0 by default)
if (ok)
{
char *device = 0;
if (shrGetCmdLineArgumentstr(argc, argv, "device", &device))
{
targetDevice = (unsigned int)atoi(device);
if (targetDevice >= deviceCount)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "invalid target device specified on command line (device %d does not exist).\n", targetDevice);
ok = false;
}
}
else
{
targetDevice = cutGetMaxGflopsDeviceId();
}
shrLog(" hipSetDevice (device %d)\n", targetDevice);
errnum = hipSetDevice(targetDevice);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipSetDevice '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
if (device)
free(device);
}
// Allocate memory buffers
if (ok)
{
shrLog(" hipMalloc bufferOut\n");
errnum = hipMalloc((void **)&bufferOut, paddedVolumeSize * sizeof(float));
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipMalloc '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
if (ok)
{
shrLog(" hipMalloc bufferIn\n");
errnum = hipMalloc((void **)&bufferIn, paddedVolumeSize * sizeof(float));
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipMalloc '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Check for a command-line specified block size
int userBlockSize;
if (ok)
{
if (shrGetCmdLineArgumenti(argc, argv, "block-size", &userBlockSize))
{
// Constrain to a multiple of k_blockDimX
userBlockSize = (userBlockSize / k_blockDimX * k_blockDimX);
// Constrain within allowed bounds
userBlockSize = CLAMP(userBlockSize, k_blockSizeMin, k_blockSizeMax);
}
else
{
userBlockSize = k_blockSizeMax;
}
}
// Check the device limit on the number of threads
if (ok)
{
shrLog(" hipFuncGetAttributes\n");
struct hipFuncAttributes funcAttrib;
errnum = hipFuncGetAttributes(&funcAttrib, FiniteDifferencesKernel);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipFuncGetAttributes '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
userBlockSize = MIN(userBlockSize, funcAttrib.maxThreadsPerBlock);
}
// Set the block size
if (ok)
{
dimBlock.x = k_blockDimX;
// Visual Studio 2005 does not like std::min
// dimBlock.y = std::min<size_t>(userBlockSize / k_blockDimX, (size_t)k_blockDimMaxY);
dimBlock.y = ((userBlockSize / k_blockDimX) < (size_t)k_blockDimMaxY) ? (userBlockSize / k_blockDimX) : (size_t)k_blockDimMaxY;
dimGrid.x = (unsigned int)ceil((float)dimx / dimBlock.x);
dimGrid.y = (unsigned int)ceil((float)dimy / dimBlock.y);
shrLog(" set block size to %dx%d\n", dimBlock.x, dimBlock.y);
shrLog(" set grid size to %dx%d\n", dimGrid.x, dimGrid.y);
}
// Check the block size is valid
if (ok)
{
if (dimBlock.x < RADIUS || dimBlock.y < RADIUS)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "invalid block size, x (%d) and y (%d) must be >= radius (%d).\n", dimBlock.x, dimBlock.y, RADIUS);
ok = false;
}
}
// Copy the input to the device input buffer
if (ok)
{
shrLog(" hipMemcpy (HostToDevice) bufferIn\n");
errnum = hipMemcpy(bufferIn + padding, input, volumeSize * sizeof(float), hipMemcpyHostToDevice);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipMemcpy '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Copy the input to the device output buffer (actually only need the halo)
if (ok)
{
shrLog(" hipMemcpy (HostToDevice) bufferOut\n");
errnum = hipMemcpy(bufferOut + padding, input, volumeSize * sizeof(float), hipMemcpyHostToDevice);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipMemcpy '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Copy the coefficients to the device coefficient buffer
if (ok)
{
shrLog(" hipMemcpyToSymbol (HostToDevice) stencil\n");
errnum = hipMemcpyToSymbol(stencil, (void *)coeff, (radius + 1) * sizeof(float));
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipMemcpyToSymbol '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
#ifdef GPU_PROFILING
// Create the events
if (ok)
{
shrLog(" hipEventCreate\n");
errnum = hipEventCreate(&profileStart);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipEventCreate '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
if (ok)
{
shrLog(" hipEventCreate\n");
errnum = hipEventCreate(&profileEnd);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipEventCreate '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
#endif
// Execute the FDTD
float *bufferSrc = bufferIn + padding;
float *bufferDst = bufferOut + padding;
shrLog(" GPU FDTD loop\n");
for (int it = 0 ; ok && it < timesteps ; it++)
{
shrLog("\tt = %d ", it);
#ifdef GPU_PROFILING
// Enqueue start event
if (ok && it == 1)
{
errnum = hipEventRecord(profileStart, 0);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipEventRecord '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
#endif
// Launch the kernel
if (ok)
{
shrLog("launch kernel\n");
hipLaunchKernelGGL(( FiniteDifferencesKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, bufferDst, bufferSrc, dimx, dimy, dimz);
}
// Toggle the buffers
// Visual Studio 2005 does not like std::swap
// std::swap<float *>(bufferSrc, bufferDst);
float *tmp = bufferDst;
bufferDst = bufferSrc;
bufferSrc = tmp;
}
shrLog("\n");
#ifdef GPU_PROFILING
// Enqueue end event
if (ok)
{
errnum = hipEventRecord(profileEnd, 0);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipEventRecord '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
#endif
// Wait for the kernel to complete
if (ok)
{
shrLog(" cutilDeviceSynchronize\n");
errnum = cutilDeviceSynchronize();
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cutilDeviceSynchronize '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Read the result back, result is in bufferSrc (after final toggle)
if (ok)
{
shrLog(" hipMemcpy (DeviceToHost)\n");
errnum = hipMemcpy(output, bufferSrc, volumeSize * sizeof(float), hipMemcpyDeviceToHost);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipMemcpy '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
// Report time
#ifdef GPU_PROFILING
float elapsedTimeMS = 0;
if (ok && profileTimesteps > 0)
{
shrLog(" hipEventElapsedTime\n\n");
errnum = hipEventElapsedTime(&elapsedTimeMS, profileStart, profileEnd);
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "hipEventElapsedTime '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
if (ok && profileTimesteps > 0)
{
// Convert milliseconds to seconds
double elapsedTime = elapsedTimeMS * 1.0e-3;
double avgElapsedTime = elapsedTime / (double)profileTimesteps;
// Determine number of computations per timestep
size_t pointsComputed = dimx * dimy * dimz;
// Determine throughput
double throughputM = 1.0e-6 * (double)pointsComputed / avgElapsedTime;
shrLogEx(LOGBOTH | MASTER, 0, "FDTD3d, Throughput = %.4f MPoints/s, Time = %.5f s, Size = %u Points, NumDevsUsed = %u, Blocksize = %u\n",
throughputM, avgElapsedTime, pointsComputed, 1, dimBlock.x * dimBlock.y);
}
#endif
// Cleanup
if (bufferIn)
hipFree(bufferIn);
if (bufferOut)
hipFree(bufferOut);
#ifdef GPU_PROFILING
if (profileStart)
hipEventDestroy(profileStart);
if (profileEnd)
hipEventDestroy(profileEnd);
#endif
if (ok)
{
shrLog("\n cutilDeviceReset\n");
errnum = cutilDeviceReset();
if (errnum != hipSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cutilDeviceReset '%s'.\n", hipGetErrorString(errnum));
ok = false;
}
}
return ok;
}
| 0907c8a7b87ac4f7b90115db9bed8290324cfea6.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "FDTD3dGPU.h"
#include <iostream>
#include <algorithm>
#include <cutil_inline.h>
#include <shrUtils.h>
#include "FDTD3dGPUKernel.cuh"
bool getTargetDeviceGlobalMemSize(memsize_t *result, const int argc, const char **argv)
{
bool ok = true;
int deviceCount = 0;
int targetDevice = 0;
size_t memsize = 0;
cudaError errnum = cudaSuccess;
// Get the number of CUDA enabled GPU devices
if (ok)
{
shrLog(" cudaGetDeviceCount\n");
errnum = cudaGetDeviceCount(&deviceCount);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaGetDeviceCount '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Select target device (device 0 by default)
if (ok)
{
char *device = 0;
if (shrGetCmdLineArgumentstr(argc, argv, "device", &device))
{
targetDevice = (unsigned int)atoi(device);
if (targetDevice >= deviceCount)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "invalid target device specified on command line (device %d does not exist).\n", targetDevice);
ok = false;
}
}
else
{
targetDevice = cutGetMaxGflopsDeviceId();
}
if (device)
free(device);
}
// Query target device for maximum memory allocation
if (ok)
{
shrLog(" cudaGetDeviceProperties\n");
struct cudaDeviceProp deviceProp;
errnum = cudaGetDeviceProperties(&deviceProp, targetDevice);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaGetDeviceProperties '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
memsize = deviceProp.totalGlobalMem;
}
// Save the result
if (ok)
{
*result = (memsize_t)memsize;
}
return ok;
}
bool fdtdGPU(float *output, const float *input, const float *coeff, const int dimx, const int dimy, const int dimz, const int radius, const int timesteps, const int argc, const char **argv)
{
bool ok = true;
const int outerDimx = dimx + 2 * radius;
const int outerDimy = dimy + 2 * radius;
const int outerDimz = dimz + 2 * radius;
const size_t volumeSize = outerDimx * outerDimy * outerDimz;
int deviceCount = 0;
int targetDevice = 0;
float *bufferOut = 0;
float *bufferIn = 0;
dim3 dimBlock;
dim3 dimGrid;
cudaError errnum = cudaSuccess;
// Ensure that the inner data starts on a 128B boundary
const int padding = (128 / sizeof(float)) - radius;
const size_t paddedVolumeSize = volumeSize + padding;
#ifdef GPU_PROFILING
cudaEvent_t profileStart = 0;
cudaEvent_t profileEnd = 0;
const int profileTimesteps = timesteps - 1;
if (ok)
{
if (profileTimesteps < 1)
{
shrLog(" cannot profile with fewer than two timesteps (timesteps=%d), profiling is disabled.\n", timesteps);
}
}
#endif
// Check the radius is valid
if (ok)
{
if (radius != RADIUS)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "radius is invalid, must be %d - see kernel for details.\n", RADIUS);
ok = false;
}
}
// Get the number of CUDA enabled GPU devices
if (ok)
{
shrLog(" cudaGetDeviceCount\n");
errnum = cudaGetDeviceCount(&deviceCount);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaGetDeviceCount '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Select target device (device 0 by default)
if (ok)
{
char *device = 0;
if (shrGetCmdLineArgumentstr(argc, argv, "device", &device))
{
targetDevice = (unsigned int)atoi(device);
if (targetDevice >= deviceCount)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "invalid target device specified on command line (device %d does not exist).\n", targetDevice);
ok = false;
}
}
else
{
targetDevice = cutGetMaxGflopsDeviceId();
}
shrLog(" cudaSetDevice (device %d)\n", targetDevice);
errnum = cudaSetDevice(targetDevice);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaSetDevice '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
if (device)
free(device);
}
// Allocate memory buffers
if (ok)
{
shrLog(" cudaMalloc bufferOut\n");
errnum = cudaMalloc((void **)&bufferOut, paddedVolumeSize * sizeof(float));
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaMalloc '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
if (ok)
{
shrLog(" cudaMalloc bufferIn\n");
errnum = cudaMalloc((void **)&bufferIn, paddedVolumeSize * sizeof(float));
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaMalloc '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Check for a command-line specified block size
int userBlockSize;
if (ok)
{
if (shrGetCmdLineArgumenti(argc, argv, "block-size", &userBlockSize))
{
// Constrain to a multiple of k_blockDimX
userBlockSize = (userBlockSize / k_blockDimX * k_blockDimX);
// Constrain within allowed bounds
userBlockSize = CLAMP(userBlockSize, k_blockSizeMin, k_blockSizeMax);
}
else
{
userBlockSize = k_blockSizeMax;
}
}
// Check the device limit on the number of threads
if (ok)
{
shrLog(" cudaFuncGetAttributes\n");
struct cudaFuncAttributes funcAttrib;
errnum = cudaFuncGetAttributes(&funcAttrib, FiniteDifferencesKernel);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaFuncGetAttributes '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
userBlockSize = MIN(userBlockSize, funcAttrib.maxThreadsPerBlock);
}
// Set the block size
if (ok)
{
dimBlock.x = k_blockDimX;
// Visual Studio 2005 does not like std::min
// dimBlock.y = std::min<size_t>(userBlockSize / k_blockDimX, (size_t)k_blockDimMaxY);
dimBlock.y = ((userBlockSize / k_blockDimX) < (size_t)k_blockDimMaxY) ? (userBlockSize / k_blockDimX) : (size_t)k_blockDimMaxY;
dimGrid.x = (unsigned int)ceil((float)dimx / dimBlock.x);
dimGrid.y = (unsigned int)ceil((float)dimy / dimBlock.y);
shrLog(" set block size to %dx%d\n", dimBlock.x, dimBlock.y);
shrLog(" set grid size to %dx%d\n", dimGrid.x, dimGrid.y);
}
// Check the block size is valid
if (ok)
{
if (dimBlock.x < RADIUS || dimBlock.y < RADIUS)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "invalid block size, x (%d) and y (%d) must be >= radius (%d).\n", dimBlock.x, dimBlock.y, RADIUS);
ok = false;
}
}
// Copy the input to the device input buffer
if (ok)
{
shrLog(" cudaMemcpy (HostToDevice) bufferIn\n");
errnum = cudaMemcpy(bufferIn + padding, input, volumeSize * sizeof(float), cudaMemcpyHostToDevice);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaMemcpy '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Copy the input to the device output buffer (actually only need the halo)
if (ok)
{
shrLog(" cudaMemcpy (HostToDevice) bufferOut\n");
errnum = cudaMemcpy(bufferOut + padding, input, volumeSize * sizeof(float), cudaMemcpyHostToDevice);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaMemcpy '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Copy the coefficients to the device coefficient buffer
if (ok)
{
shrLog(" cudaMemcpyToSymbol (HostToDevice) stencil\n");
errnum = cudaMemcpyToSymbol(stencil, (void *)coeff, (radius + 1) * sizeof(float));
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaMemcpyToSymbol '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
#ifdef GPU_PROFILING
// Create the events
if (ok)
{
shrLog(" cudaEventCreate\n");
errnum = cudaEventCreate(&profileStart);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaEventCreate '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
if (ok)
{
shrLog(" cudaEventCreate\n");
errnum = cudaEventCreate(&profileEnd);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaEventCreate '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
#endif
// Execute the FDTD
float *bufferSrc = bufferIn + padding;
float *bufferDst = bufferOut + padding;
shrLog(" GPU FDTD loop\n");
for (int it = 0 ; ok && it < timesteps ; it++)
{
shrLog("\tt = %d ", it);
#ifdef GPU_PROFILING
// Enqueue start event
if (ok && it == 1)
{
errnum = cudaEventRecord(profileStart, 0);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaEventRecord '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
#endif
// Launch the kernel
if (ok)
{
shrLog("launch kernel\n");
FiniteDifferencesKernel<<<dimGrid, dimBlock>>>(bufferDst, bufferSrc, dimx, dimy, dimz);
}
// Toggle the buffers
// Visual Studio 2005 does not like std::swap
// std::swap<float *>(bufferSrc, bufferDst);
float *tmp = bufferDst;
bufferDst = bufferSrc;
bufferSrc = tmp;
}
shrLog("\n");
#ifdef GPU_PROFILING
// Enqueue end event
if (ok)
{
errnum = cudaEventRecord(profileEnd, 0);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaEventRecord '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
#endif
// Wait for the kernel to complete
if (ok)
{
shrLog(" cutilDeviceSynchronize\n");
errnum = cutilDeviceSynchronize();
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cutilDeviceSynchronize '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Read the result back, result is in bufferSrc (after final toggle)
if (ok)
{
shrLog(" cudaMemcpy (DeviceToHost)\n");
errnum = cudaMemcpy(output, bufferSrc, volumeSize * sizeof(float), cudaMemcpyDeviceToHost);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaMemcpy '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
// Report time
#ifdef GPU_PROFILING
float elapsedTimeMS = 0;
if (ok && profileTimesteps > 0)
{
shrLog(" cudaEventElapsedTime\n\n");
errnum = cudaEventElapsedTime(&elapsedTimeMS, profileStart, profileEnd);
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cudaEventElapsedTime '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
if (ok && profileTimesteps > 0)
{
// Convert milliseconds to seconds
double elapsedTime = elapsedTimeMS * 1.0e-3;
double avgElapsedTime = elapsedTime / (double)profileTimesteps;
// Determine number of computations per timestep
size_t pointsComputed = dimx * dimy * dimz;
// Determine throughput
double throughputM = 1.0e-6 * (double)pointsComputed / avgElapsedTime;
shrLogEx(LOGBOTH | MASTER, 0, "FDTD3d, Throughput = %.4f MPoints/s, Time = %.5f s, Size = %u Points, NumDevsUsed = %u, Blocksize = %u\n",
throughputM, avgElapsedTime, pointsComputed, 1, dimBlock.x * dimBlock.y);
}
#endif
// Cleanup
if (bufferIn)
cudaFree(bufferIn);
if (bufferOut)
cudaFree(bufferOut);
#ifdef GPU_PROFILING
if (profileStart)
cudaEventDestroy(profileStart);
if (profileEnd)
cudaEventDestroy(profileEnd);
#endif
if (ok)
{
shrLog("\n cutilDeviceReset\n");
errnum = cutilDeviceReset();
if (errnum != cudaSuccess)
{
shrLogEx(LOGBOTH | ERRORMSG, 0, "cutilDeviceReset '%s'.\n", cudaGetErrorString(errnum));
ok = false;
}
}
return ok;
}
|
3803b1a8ff7ebc1274410a2f20dfccb407b82d71.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace nd4j {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance,
const NDArray* gamma, const NDArray* beta,
NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
zStrides.push_back((int)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(z, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(z, dataType, xRank, xShape.data(), zStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/beta failed", err);
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetConvolutionNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
err = cudnnBatchNormalizationForwardInference(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
z, output->getSpecialBuffer(),
params,
gamma ? gamma->getSpecialBuffer(): nullptr,
beta ? beta->getSpecialBuffer() : nullptr,
mean->getSpecialBuffer(), variance->getSpecialBuffer(), epsilon);
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnBatchNormalizationForwardInference failed", err);
// cudaErr = hipStreamSynchronize(*context->getCudaStream());
// if (cudaErr != 0)
// throw cuda_exception::build("batchnormCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if(applyScale)
gamma = INPUT_VARIABLE(3);
if(applyOffset)
beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape) , 0, "BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for(int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
if(needPermut) { // if NHWC
std::vector<int> perm = {0, 3, 1, 2}; // NHWC -> NCHW
input = new NDArray(input->permute(perm));
output = new NDArray(output->permute(perm));
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
if(needPermut) {
delete input;
delete output;
}
return Status::OK();
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// disable cudnn batchnorm so far
return false;
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(beta)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), beta->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
}
}
}
| 3803b1a8ff7ebc1274410a2f20dfccb407b82d71.cu | /*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace nd4j {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance,
const NDArray* gamma, const NDArray* beta,
NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
zStrides.push_back((int)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(z, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(z, dataType, xRank, xShape.data(), zStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/beta failed", err);
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetConvolutionNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
err = cudnnBatchNormalizationForwardInference(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
z, output->getSpecialBuffer(),
params,
gamma ? gamma->getSpecialBuffer(): nullptr,
beta ? beta->getSpecialBuffer() : nullptr,
mean->getSpecialBuffer(), variance->getSpecialBuffer(), epsilon);
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnBatchNormalizationForwardInference failed", err);
// cudaErr = cudaStreamSynchronize(*context->getCudaStream());
// if (cudaErr != 0)
// throw cuda_exception::build("batchnormCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if(applyScale)
gamma = INPUT_VARIABLE(3);
if(applyOffset)
beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape) , 0, "BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for(int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
if(needPermut) { // if NHWC
std::vector<int> perm = {0, 3, 1, 2}; // NHWC -> NCHW
input = new NDArray(input->permute(perm));
output = new NDArray(output->permute(perm));
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
if(needPermut) {
delete input;
delete output;
}
return Status::OK();
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// disable cudnn batchnorm so far
return false;
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(beta)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), beta->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
}
}
}
|
a52158b0e33be9206300bd4d1ddf0c50bdb63ddd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <time.h>
#define NUM_BLOCKS 65535
using namespace std;
__device__ __host__
int mIndex(int i, int j, int n, int m) {
return i*m + j;
}
int64_t cache[100];
int64_t cached = 0;
int64_t factorial(int64_t n) {
if (n < 2) return 1;
if (n > cached) {
cache[n] = n * factorial(n-1);
cached = n;
// cout << "Calculating factorial " << n <<": " << cache[n] << endl;
}
return cache[n];
}
void printOrders(int* orders, int numCustomers, int numProducts) {
for (int i = 0; i < numCustomers; i++) {
for (int j = 0; j < numProducts; j++) {
cout << orders[mIndex(i, j, numCustomers, numProducts)] << " ";
}
cout << endl;
}
}
void printSet(int set, int numProducts) {
for (int i = 0; i < numProducts; i++) {
cout << set % 2 << " ";
set /= 2;
}
cout << endl;
}
void printOrdersInSequence(int* sequence,
int* orders,
int numCustomers,
int numProducts) {
for (int i = 0; i < numCustomers; i++) {
for (int j = 0; j < numProducts; j++) {
cout << orders[mIndex(i, sequence[j], numCustomers, numProducts)]
<< " ";
}
cout << endl;
}
}
// Calculates the maximum number of open stacks for a given producing sequence
__device__ __host__
int maximumOpenStacks(int* sequence,
int* orders,
int numCustomers,
int numProducts) {
int* toDo = (int*) malloc(numCustomers * sizeof(int));
int* done = (int*) malloc(numCustomers * sizeof(int));
for (int customer = 0; customer < numCustomers; customer++) {
done[customer] = 0;
toDo[customer] = 0;
for (int product = 0; product < numProducts; product++) {
// suppose that orders has only 1's and 0's
toDo[customer] += orders[mIndex(customer, product, numCustomers,
numProducts)];
}
}
int numOpenStacks = 0;
for (int i = 0; i < numProducts; i++) {
int product = sequence[i];
for (int customer = 0; customer < numCustomers; customer++) {
if (orders[mIndex(customer,
product,
numCustomers,
numProducts)] > 0) {
toDo[customer]--;
done[customer]++;
}
}
int currentOpenStacks = 0;
for (int customer = 0; customer < numCustomers; customer++) {
if ((done[customer] > 0 && toDo[customer] > 0) ||
orders[mIndex(customer,
product,
numCustomers,
numProducts)] > 0) {
currentOpenStacks++;
}
}
if (currentOpenStacks > numOpenStacks) {
numOpenStacks = currentOpenStacks;
}
}
free(done);
free(toDo);
return numOpenStacks;
}
__device__ __host__
void generateSequence(int* sequence, int k, int numProducts) {
for (int i = 0; i < numProducts; i++) {
sequence[i] = i;
}
for (int i = 0; i < numProducts; i++) {
int temp = sequence[k % (i + 1)];
sequence[k % (i + 1)] = sequence[i];
sequence[i] = temp;
k = k / (i + 1);
}
}
__global__
void calculateMaximumOpenStacks(int* stackSizes,
int* orders,
int numCustomers,
int numProducts,
int step) {
int* sequence = (int*) malloc(numProducts * sizeof(int));
generateSequence(sequence, step * NUM_BLOCKS + blockIdx.x, numProducts);
stackSizes[blockIdx.x] = maximumOpenStacks(sequence,
orders,
numCustomers,
numProducts);
free(sequence);
}
void checkOk(hipError_t err) {
if (err != hipSuccess) {
cout << hipGetErrorString(err) << " at " << __LINE__ << endl;
exit(EXIT_FAILURE);
}
}
void bruteForceSolve(int* orders,
int numCustomers,
int numProducts) {
// Allocate GPU memory
int* orders_d;
int sizeOrders = numCustomers * numProducts * sizeof(int);
checkOk(hipMalloc((void**) &orders_d, sizeOrders));
checkOk(hipMemcpy(orders_d, orders, sizeOrders, hipMemcpyHostToDevice));
int* stackSizes_d;
int64_t numSequences = factorial(numProducts);
int sizeStacksSizes = NUM_BLOCKS * sizeof(int);
checkOk(hipMalloc((void**) &stackSizes_d, sizeStacksSizes));
cout << "numSequences: " << numSequences << endl;
// Process all sequences
int* stackSizes = (int*) malloc(sizeStacksSizes);
int minStacks = numCustomers + 1;
int bestK = -1;
for (int i = 0; i < ceil(1.0 * numSequences/NUM_BLOCKS); i++) {
int numSequencesToProcess;
if (numSequences - i * NUM_BLOCKS >= NUM_BLOCKS)
numSequencesToProcess = NUM_BLOCKS;
else
numSequencesToProcess = numSequences - i * NUM_BLOCKS;
// Calculating open stacks for each one of them
hipLaunchKernelGGL(( calculateMaximumOpenStacks), dim3(numSequencesToProcess), dim3(1), 0, 0, stackSizes_d,
orders_d,
numCustomers,
numProducts,
i);
checkOk(hipMemcpy(stackSizes,
stackSizes_d,
sizeStacksSizes,
hipMemcpyDeviceToHost));
// Calculate the minimum so far
for (int j = 0; j < numSequencesToProcess; j++) {
if (stackSizes[j] < minStacks) {
minStacks = stackSizes[j];
bestK = j + i * NUM_BLOCKS;
}
}
}
free(stackSizes);
// Deallocate GPU memory
checkOk(hipFree(stackSizes_d));
checkOk(hipFree(orders_d));
// Debugging output
cout << "minStacks: " << minStacks << endl;
// Print sequence
int* sequence = (int*) malloc(numProducts * sizeof(int));
generateSequence(sequence, bestK, numProducts);
cout << "Best sequence:" << endl;
for (int i = 0; i < numProducts; i++) {
cout << sequence[i] << " ";
}
cout << endl;
// See orders being produced
printOrdersInSequence(sequence, orders, numCustomers, numProducts);
cout << "Open stacks: "
<< maximumOpenStacks(sequence, orders, numCustomers, numProducts)
<< endl;
free(sequence);
// End of debugging code
}
int64_t combination(int n, int k) {
return factorial(n)/factorial(k)/factorial(n-k);
}
__device__
bool contains(int set, int p) {
for (int i = 0; i < p; i++) {
set /= 2;
}
return set % 2;
}
__device__ __host__
int remove(int set, int p) {
int stack = 0;
int offset = 1;
for (int i = 0; i < 32; i++) {
if (i != p)
stack += (set % 2) * offset;
set /= 2;
offset *= 2;
}
return stack;
}
__device__
int a(int p,
int set,
int* orders,
int numCustomers,
int numProducts) {
bool* before = (bool*) malloc(numCustomers * sizeof(bool));
bool* after = (bool*) malloc(numCustomers * sizeof(bool));
bool* now = (bool*) malloc(numCustomers * sizeof(bool));
for (int i = 0; i < numCustomers; i++) {
before[i] = false;
after[i] = false;
now[i] = false;
}
for (int i = 0; i < numCustomers; i++) {
for (int j = 0; j < numProducts; j++) {
if (j == p &&
orders[mIndex(i, j, numCustomers, numProducts)] > 0) {
now[i] = true;
}
if (contains(set, j) == true &&
orders[mIndex(i, j, numCustomers, numProducts)] > 0) {
after[i] = true;
}
if (contains(set, j) == false &&
orders[mIndex(i, j, numCustomers, numProducts)] > 0) {
before[i] = true;
}
}
}
int active_stacks = 0;
for (int i = 0; i < numCustomers; i++) {
if(now[i] || (before[i] && after[i])) {
active_stacks++;
}
}
free(now);
free(after);
free(before);
return active_stacks;
}
__global__
void computeStacks(int offset,
int* sets,
int* stacksResults,
int* bestP,
int* orders,
int numCustomers,
int numProducts) {
int set = sets[offset + blockIdx.x];
if (set == 0) {
stacksResults[set] = 0;
return;
}
int best = -1;
int min_stacks = numCustomers * 10;
for (int p = 0; p < numProducts; p++) {
if (contains(set, p)) {
int newSet = remove(set, p);
int active = a(p, newSet, orders, numCustomers, numProducts);
int after = stacksResults[newSet];
int max = (active > after) ? active : after;
if (max < min_stacks) {
min_stacks = max;
best = p;
}
}
}
stacksResults[set] = min_stacks;
bestP[set] = best;
}
int countOnes(int n) {
int ones = 0;
while (n > 0) {
ones += n % 2;
n /= 2;
}
return ones;
}
void dpSolve(int* orders, int numCustomers, int numProducts) {
int* orders_d;
int sizeOrders = numCustomers * numProducts * sizeof(int);
checkOk(hipMalloc((void**) &orders_d, sizeOrders));
checkOk(hipMemcpy(orders_d, orders, sizeOrders, hipMemcpyHostToDevice));
int** sorted_sets = (int**) malloc((numProducts + 1) * sizeof(int*));
int* combinations = (int*) malloc((numProducts + 1) * sizeof(int*));
for (int i = 0; i < (numProducts + 1); i++) {
int numCombinations = combination(numProducts, i);
sorted_sets[i] = (int*) malloc(numCombinations * sizeof(int));
combinations[i] = 0;
}
for (int i = 0; i < pow(2, numProducts); i++) {
int ones = countOnes(i);
sorted_sets[ones][combinations[ones]] = i;
combinations[ones]++;
}
int setsSize = pow(2, numProducts) * sizeof(int);
int* sets = (int*) malloc(setsSize);
int* sets_d;
checkOk(hipMalloc((void**) &sets_d, setsSize));
int i = 0;
for (int j = 0; j < (numProducts + 1); j++) {
for (int k = 0; k < combinations[j]; k++) {
sets[i] = sorted_sets[j][k];
i++;
}
}
checkOk(hipMemcpy(sets_d, sets, setsSize, hipMemcpyHostToDevice));
int stacksResultsSize = pow(2, numProducts) * sizeof(int);
int* stacksResults = (int*) malloc(stacksResultsSize);
int* stacksResults_d;
checkOk(hipMalloc((void**) &stacksResults_d, stacksResultsSize));
int bestPSize = pow(2, numProducts) * sizeof(int);
int* bestP = (int*) malloc(bestPSize);
int* bestP_d;
checkOk(hipMalloc((void**) &bestP_d, bestPSize));
int offset = 0;
for (int setSize = 0; setSize < numProducts + 1; setSize++) {
hipLaunchKernelGGL(( computeStacks), dim3(combinations[setSize]), dim3(1), 0, 0, offset,
sets_d,
stacksResults_d,
bestP_d,
orders_d,
numCustomers,
numProducts);
offset += combinations[setSize];
}
checkOk(hipMemcpy(stacksResults,
stacksResults_d,
stacksResultsSize,
hipMemcpyDeviceToHost));
checkOk(hipMemcpy(bestP, bestP_d, bestPSize, hipMemcpyDeviceToHost));
cout << "Best sequence:" << endl;
int set = pow(2, numProducts) - 1;
int* sequence = (int*) malloc(numProducts * sizeof(int));
for (int i = 0; i < numProducts; i++) {
int best = bestP[set];
set = remove(set, best);
sequence[i] = best;
cout << best << " ";
}
cout << endl;
printOrdersInSequence(sequence, orders, numCustomers, numProducts);
cout << "OpenStacks: " << stacksResults[(int) pow(2, numProducts) - 1]
<< endl;
// Freeing memory
checkOk(hipFree(bestP_d));
checkOk(hipFree(stacksResults_d));
checkOk(hipFree(sets_d));
checkOk(hipFree(orders_d));
free(bestP);
free(stacksResults);
free(sets);
free(combinations);
for (int i = 0; i < (numProducts + 1); i++) free(sorted_sets[i]);
free(sorted_sets);
}
int main(int argc, char** argv) {
bool useBruteForce = false;
if (argc < 1 || (strncmp(argv[1], "bf", 2) != 0 &&
strncmp(argv[1], "dp", 2) != 0)) {
cout << "Specify if should use \"bf\" or \"dp\" as the first argument"
<< endl;
exit(EXIT_FAILURE);
} else {
if (strncmp(argv[1], "bf", 2) == 0) {
cout << "Solving by Brute Force..." << endl;
useBruteForce = true;
} else {
cout << "Solving by Dynamic Programming..." << endl;
useBruteForce = false;
}
}
float totalTime = 0;
float numInstances = 0;
float minTime = 1000000;
float maxTime = 0;
string buffer;
while(getline(cin, buffer)) {
// Read input
cout << "buffer: " << buffer << endl;
getline(cin, buffer);
int numCustomers = 0, numProducts = 0;
istringstream nums(buffer);
nums >> numCustomers;
nums >> numProducts;
int* orders;
orders = (int*) malloc(numCustomers * numProducts * sizeof(int));
for (int i = 0; i < numCustomers; i++) {
getline(cin, buffer);
istringstream customerOrders(buffer);
for (int j = 0; j < numProducts; j++) {
int didOrder;
customerOrders >> didOrder;
orders[mIndex(i, j, numCustomers, numProducts)] = didOrder;
}
}
cout << "numCustomers: " << numCustomers << endl
<< "numProducts: " << numProducts << endl;
printOrders(orders, numCustomers, numProducts);
// Solve
clock_t start = clock();
if (useBruteForce) {
bruteForceSolve(orders, numCustomers, numProducts);
}
else {
dpSolve(orders, numCustomers, numProducts);
}
clock_t end = clock();
float time = (float)(end - start) / CLOCKS_PER_SEC;
cout << "Took " << time << " seconds" << endl << endl;
totalTime += time;
numInstances++;
minTime = (time < minTime) ? time : minTime;
maxTime = (time > maxTime) ? time : maxTime;
getline(cin, buffer);
}
cout << "Solved: " << numInstances << " instances" << endl;
cout << "totalTime: " << totalTime << " seconds" << endl;
cout << "minTime: " << minTime << " seconds" << endl;
cout << "maxTime: " << maxTime << " seconds" << endl;
cout << "Average: " << totalTime/numInstances << " seconds" << endl;
return 0;
}
| a52158b0e33be9206300bd4d1ddf0c50bdb63ddd.cu | #include <cmath>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <time.h>
#define NUM_BLOCKS 65535
using namespace std;
__device__ __host__
int mIndex(int i, int j, int n, int m) {
return i*m + j;
}
int64_t cache[100];
int64_t cached = 0;
int64_t factorial(int64_t n) {
if (n < 2) return 1;
if (n > cached) {
cache[n] = n * factorial(n-1);
cached = n;
// cout << "Calculating factorial " << n <<": " << cache[n] << endl;
}
return cache[n];
}
void printOrders(int* orders, int numCustomers, int numProducts) {
for (int i = 0; i < numCustomers; i++) {
for (int j = 0; j < numProducts; j++) {
cout << orders[mIndex(i, j, numCustomers, numProducts)] << " ";
}
cout << endl;
}
}
void printSet(int set, int numProducts) {
for (int i = 0; i < numProducts; i++) {
cout << set % 2 << " ";
set /= 2;
}
cout << endl;
}
void printOrdersInSequence(int* sequence,
int* orders,
int numCustomers,
int numProducts) {
for (int i = 0; i < numCustomers; i++) {
for (int j = 0; j < numProducts; j++) {
cout << orders[mIndex(i, sequence[j], numCustomers, numProducts)]
<< " ";
}
cout << endl;
}
}
// Calculates the maximum number of open stacks for a given producing sequence
__device__ __host__
int maximumOpenStacks(int* sequence,
int* orders,
int numCustomers,
int numProducts) {
int* toDo = (int*) malloc(numCustomers * sizeof(int));
int* done = (int*) malloc(numCustomers * sizeof(int));
for (int customer = 0; customer < numCustomers; customer++) {
done[customer] = 0;
toDo[customer] = 0;
for (int product = 0; product < numProducts; product++) {
// suppose that orders has only 1's and 0's
toDo[customer] += orders[mIndex(customer, product, numCustomers,
numProducts)];
}
}
int numOpenStacks = 0;
for (int i = 0; i < numProducts; i++) {
int product = sequence[i];
for (int customer = 0; customer < numCustomers; customer++) {
if (orders[mIndex(customer,
product,
numCustomers,
numProducts)] > 0) {
toDo[customer]--;
done[customer]++;
}
}
int currentOpenStacks = 0;
for (int customer = 0; customer < numCustomers; customer++) {
if ((done[customer] > 0 && toDo[customer] > 0) ||
orders[mIndex(customer,
product,
numCustomers,
numProducts)] > 0) {
currentOpenStacks++;
}
}
if (currentOpenStacks > numOpenStacks) {
numOpenStacks = currentOpenStacks;
}
}
free(done);
free(toDo);
return numOpenStacks;
}
__device__ __host__
void generateSequence(int* sequence, int k, int numProducts) {
for (int i = 0; i < numProducts; i++) {
sequence[i] = i;
}
for (int i = 0; i < numProducts; i++) {
int temp = sequence[k % (i + 1)];
sequence[k % (i + 1)] = sequence[i];
sequence[i] = temp;
k = k / (i + 1);
}
}
__global__
void calculateMaximumOpenStacks(int* stackSizes,
int* orders,
int numCustomers,
int numProducts,
int step) {
int* sequence = (int*) malloc(numProducts * sizeof(int));
generateSequence(sequence, step * NUM_BLOCKS + blockIdx.x, numProducts);
stackSizes[blockIdx.x] = maximumOpenStacks(sequence,
orders,
numCustomers,
numProducts);
free(sequence);
}
void checkOk(cudaError_t err) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << " at " << __LINE__ << endl;
exit(EXIT_FAILURE);
}
}
void bruteForceSolve(int* orders,
int numCustomers,
int numProducts) {
// Allocate GPU memory
int* orders_d;
int sizeOrders = numCustomers * numProducts * sizeof(int);
checkOk(cudaMalloc((void**) &orders_d, sizeOrders));
checkOk(cudaMemcpy(orders_d, orders, sizeOrders, cudaMemcpyHostToDevice));
int* stackSizes_d;
int64_t numSequences = factorial(numProducts);
int sizeStacksSizes = NUM_BLOCKS * sizeof(int);
checkOk(cudaMalloc((void**) &stackSizes_d, sizeStacksSizes));
cout << "numSequences: " << numSequences << endl;
// Process all sequences
int* stackSizes = (int*) malloc(sizeStacksSizes);
int minStacks = numCustomers + 1;
int bestK = -1;
for (int i = 0; i < ceil(1.0 * numSequences/NUM_BLOCKS); i++) {
int numSequencesToProcess;
if (numSequences - i * NUM_BLOCKS >= NUM_BLOCKS)
numSequencesToProcess = NUM_BLOCKS;
else
numSequencesToProcess = numSequences - i * NUM_BLOCKS;
// Calculating open stacks for each one of them
calculateMaximumOpenStacks<<<numSequencesToProcess, 1>>>(stackSizes_d,
orders_d,
numCustomers,
numProducts,
i);
checkOk(cudaMemcpy(stackSizes,
stackSizes_d,
sizeStacksSizes,
cudaMemcpyDeviceToHost));
// Calculate the minimum so far
for (int j = 0; j < numSequencesToProcess; j++) {
if (stackSizes[j] < minStacks) {
minStacks = stackSizes[j];
bestK = j + i * NUM_BLOCKS;
}
}
}
free(stackSizes);
// Deallocate GPU memory
checkOk(cudaFree(stackSizes_d));
checkOk(cudaFree(orders_d));
// Debugging output
cout << "minStacks: " << minStacks << endl;
// Print sequence
int* sequence = (int*) malloc(numProducts * sizeof(int));
generateSequence(sequence, bestK, numProducts);
cout << "Best sequence:" << endl;
for (int i = 0; i < numProducts; i++) {
cout << sequence[i] << " ";
}
cout << endl;
// See orders being produced
printOrdersInSequence(sequence, orders, numCustomers, numProducts);
cout << "Open stacks: "
<< maximumOpenStacks(sequence, orders, numCustomers, numProducts)
<< endl;
free(sequence);
// End of debugging code
}
int64_t combination(int n, int k) {
return factorial(n)/factorial(k)/factorial(n-k);
}
__device__
bool contains(int set, int p) {
for (int i = 0; i < p; i++) {
set /= 2;
}
return set % 2;
}
__device__ __host__
int remove(int set, int p) {
int stack = 0;
int offset = 1;
for (int i = 0; i < 32; i++) {
if (i != p)
stack += (set % 2) * offset;
set /= 2;
offset *= 2;
}
return stack;
}
__device__
int a(int p,
int set,
int* orders,
int numCustomers,
int numProducts) {
bool* before = (bool*) malloc(numCustomers * sizeof(bool));
bool* after = (bool*) malloc(numCustomers * sizeof(bool));
bool* now = (bool*) malloc(numCustomers * sizeof(bool));
for (int i = 0; i < numCustomers; i++) {
before[i] = false;
after[i] = false;
now[i] = false;
}
for (int i = 0; i < numCustomers; i++) {
for (int j = 0; j < numProducts; j++) {
if (j == p &&
orders[mIndex(i, j, numCustomers, numProducts)] > 0) {
now[i] = true;
}
if (contains(set, j) == true &&
orders[mIndex(i, j, numCustomers, numProducts)] > 0) {
after[i] = true;
}
if (contains(set, j) == false &&
orders[mIndex(i, j, numCustomers, numProducts)] > 0) {
before[i] = true;
}
}
}
int active_stacks = 0;
for (int i = 0; i < numCustomers; i++) {
if(now[i] || (before[i] && after[i])) {
active_stacks++;
}
}
free(now);
free(after);
free(before);
return active_stacks;
}
__global__
void computeStacks(int offset,
int* sets,
int* stacksResults,
int* bestP,
int* orders,
int numCustomers,
int numProducts) {
int set = sets[offset + blockIdx.x];
if (set == 0) {
stacksResults[set] = 0;
return;
}
int best = -1;
int min_stacks = numCustomers * 10;
for (int p = 0; p < numProducts; p++) {
if (contains(set, p)) {
int newSet = remove(set, p);
int active = a(p, newSet, orders, numCustomers, numProducts);
int after = stacksResults[newSet];
int max = (active > after) ? active : after;
if (max < min_stacks) {
min_stacks = max;
best = p;
}
}
}
stacksResults[set] = min_stacks;
bestP[set] = best;
}
int countOnes(int n) {
int ones = 0;
while (n > 0) {
ones += n % 2;
n /= 2;
}
return ones;
}
void dpSolve(int* orders, int numCustomers, int numProducts) {
int* orders_d;
int sizeOrders = numCustomers * numProducts * sizeof(int);
checkOk(cudaMalloc((void**) &orders_d, sizeOrders));
checkOk(cudaMemcpy(orders_d, orders, sizeOrders, cudaMemcpyHostToDevice));
int** sorted_sets = (int**) malloc((numProducts + 1) * sizeof(int*));
int* combinations = (int*) malloc((numProducts + 1) * sizeof(int*));
for (int i = 0; i < (numProducts + 1); i++) {
int numCombinations = combination(numProducts, i);
sorted_sets[i] = (int*) malloc(numCombinations * sizeof(int));
combinations[i] = 0;
}
for (int i = 0; i < pow(2, numProducts); i++) {
int ones = countOnes(i);
sorted_sets[ones][combinations[ones]] = i;
combinations[ones]++;
}
int setsSize = pow(2, numProducts) * sizeof(int);
int* sets = (int*) malloc(setsSize);
int* sets_d;
checkOk(cudaMalloc((void**) &sets_d, setsSize));
int i = 0;
for (int j = 0; j < (numProducts + 1); j++) {
for (int k = 0; k < combinations[j]; k++) {
sets[i] = sorted_sets[j][k];
i++;
}
}
checkOk(cudaMemcpy(sets_d, sets, setsSize, cudaMemcpyHostToDevice));
int stacksResultsSize = pow(2, numProducts) * sizeof(int);
int* stacksResults = (int*) malloc(stacksResultsSize);
int* stacksResults_d;
checkOk(cudaMalloc((void**) &stacksResults_d, stacksResultsSize));
int bestPSize = pow(2, numProducts) * sizeof(int);
int* bestP = (int*) malloc(bestPSize);
int* bestP_d;
checkOk(cudaMalloc((void**) &bestP_d, bestPSize));
int offset = 0;
for (int setSize = 0; setSize < numProducts + 1; setSize++) {
computeStacks<<<combinations[setSize], 1>>>(offset,
sets_d,
stacksResults_d,
bestP_d,
orders_d,
numCustomers,
numProducts);
offset += combinations[setSize];
}
checkOk(cudaMemcpy(stacksResults,
stacksResults_d,
stacksResultsSize,
cudaMemcpyDeviceToHost));
checkOk(cudaMemcpy(bestP, bestP_d, bestPSize, cudaMemcpyDeviceToHost));
cout << "Best sequence:" << endl;
int set = pow(2, numProducts) - 1;
int* sequence = (int*) malloc(numProducts * sizeof(int));
for (int i = 0; i < numProducts; i++) {
int best = bestP[set];
set = remove(set, best);
sequence[i] = best;
cout << best << " ";
}
cout << endl;
printOrdersInSequence(sequence, orders, numCustomers, numProducts);
cout << "OpenStacks: " << stacksResults[(int) pow(2, numProducts) - 1]
<< endl;
// Freeing memory
checkOk(cudaFree(bestP_d));
checkOk(cudaFree(stacksResults_d));
checkOk(cudaFree(sets_d));
checkOk(cudaFree(orders_d));
free(bestP);
free(stacksResults);
free(sets);
free(combinations);
for (int i = 0; i < (numProducts + 1); i++) free(sorted_sets[i]);
free(sorted_sets);
}
int main(int argc, char** argv) {
bool useBruteForce = false;
if (argc < 1 || (strncmp(argv[1], "bf", 2) != 0 &&
strncmp(argv[1], "dp", 2) != 0)) {
cout << "Specify if should use \"bf\" or \"dp\" as the first argument"
<< endl;
exit(EXIT_FAILURE);
} else {
if (strncmp(argv[1], "bf", 2) == 0) {
cout << "Solving by Brute Force..." << endl;
useBruteForce = true;
} else {
cout << "Solving by Dynamic Programming..." << endl;
useBruteForce = false;
}
}
float totalTime = 0;
float numInstances = 0;
float minTime = 1000000;
float maxTime = 0;
string buffer;
while(getline(cin, buffer)) {
// Read input
cout << "buffer: " << buffer << endl;
getline(cin, buffer);
int numCustomers = 0, numProducts = 0;
istringstream nums(buffer);
nums >> numCustomers;
nums >> numProducts;
int* orders;
orders = (int*) malloc(numCustomers * numProducts * sizeof(int));
for (int i = 0; i < numCustomers; i++) {
getline(cin, buffer);
istringstream customerOrders(buffer);
for (int j = 0; j < numProducts; j++) {
int didOrder;
customerOrders >> didOrder;
orders[mIndex(i, j, numCustomers, numProducts)] = didOrder;
}
}
cout << "numCustomers: " << numCustomers << endl
<< "numProducts: " << numProducts << endl;
printOrders(orders, numCustomers, numProducts);
// Solve
clock_t start = clock();
if (useBruteForce) {
bruteForceSolve(orders, numCustomers, numProducts);
}
else {
dpSolve(orders, numCustomers, numProducts);
}
clock_t end = clock();
float time = (float)(end - start) / CLOCKS_PER_SEC;
cout << "Took " << time << " seconds" << endl << endl;
totalTime += time;
numInstances++;
minTime = (time < minTime) ? time : minTime;
maxTime = (time > maxTime) ? time : maxTime;
getline(cin, buffer);
}
cout << "Solved: " << numInstances << " instances" << endl;
cout << "totalTime: " << totalTime << " seconds" << endl;
cout << "minTime: " << minTime << " seconds" << endl;
cout << "maxTime: " << maxTime << " seconds" << endl;
cout << "Average: " << totalTime/numInstances << " seconds" << endl;
return 0;
}
|
bb6fd99402271da5b15453af9db286c30b5d7d68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef NEW_KERN_CU
#define NEW_KERN_CU
#endif
#define PI_F 3.141592653589793f
#define MU_0 4e-7f*PI_F
#ifndef MU_C
#define MU_C 1
#endif
#include <helper_math.h>
#include "new_kern.h"
__constant__ NewParams nparams;
texture<float4, hipTextureType1D, hipReadModeElementType> pos_tex;
texture<float4, hipTextureType1D, hipReadModeElementType> mom_tex;
texture<float4, hipTextureType1D, hipReadModeElementType> vel_tex;
__device__ uint3 calcGPos(float3 p)
{
uint3 gpos;
gpos.x = floorf((p.x - nparams.origin.x)/nparams.cellSize.x);
gpos.y = floorf((p.y - nparams.origin.y)/nparams.cellSize.y);
gpos.z = floorf((p.z - nparams.origin.z)/nparams.cellSize.z);
gpos.x = (nparams.gridSize.x + gpos.x) % nparams.gridSize.x;
gpos.y = (nparams.gridSize.y + gpos.y) % nparams.gridSize.y;
gpos.z = (nparams.gridSize.z + gpos.z) % nparams.gridSize.z;
return gpos;
}
__global__ void comp_phashK(const float4* d_pos, uint* d_pHash, uint* d_pIndex, const uint* d_CellHash)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
float4 pos = d_pos[idx];
float3 p = make_float3(pos);
//float rad = pos.w;
uint3 gpos = calcGPos(p);
uint cell_id = gpos.x + gpos.y*nparams.gridSize.x +
gpos.z*nparams.gridSize.y*nparams.gridSize.x;
d_pIndex[idx] = idx;
d_pHash[idx] = d_CellHash[cell_id] ;//+ (rad < 3e-6f ? nparams.numCells : 0 );
}
__global__ void findCellStartK(uint* cellStart, //o: cell starts
uint* cellEnd, //o: cell ends
uint* phash) //i: hashes sorted by hash
{
extern __shared__ uint sharedHash[]; //size of blockDim+1
uint index = blockIdx.x*blockDim.x + threadIdx.x;
uint hash;
if(index < nparams.N )
{
hash = phash[index];
//load all neighboring hashes into memory
sharedHash[threadIdx.x+1] = hash;
if(index > 0 && threadIdx.x == 0)
sharedHash[0] = phash[index-1];
}
__syncthreads();
if(index < nparams.N)
{
//once load complete, compare to hash before and if !=, then write starts/ends
if(index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)// if not first cell
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == nparams.N - 1){//if the last particle, the cell ends here
cellEnd[hash] = index+1;
}
}
}
__global__ void reorderK(const uint* dSortedIndex, float4* sortedA,
float4* sortedB, const float4* oldA,
const float4* oldB)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint sortedIdx = dSortedIndex[idx];
sortedA[idx] = oldA[sortedIdx];
sortedB[idx] = oldB[sortedIdx];
}
__device__ inline void applyBC(uint idx, float deltaTime, const float radius1,
const float3 p1, float3 force, const float4* integrPos, float4* newPos)
{
float Cd = 6.0f*PI_F*radius1*nparams.visc;
float ybot = p1.y - nparams.origin.y;
force.x += nparams.shear*ybot*Cd;
//apply flow BCs
if(ybot <= nparams.pin_d*radius1)
force = make_float3(0,0,0);
if(ybot >= nparams.L.y - nparams.pin_d*radius1)
force = make_float3(nparams.shear*nparams.L.y*Cd,0,0);
float3 ipos = make_float3(integrPos[idx]);
float3 npos = ipos + force/Cd*deltaTime;
float edge = 0.5f*nparams.L.y - radius1;
npos.y = npos.y > edge ? edge : npos.y;
npos.y = npos.y < -edge ? -edge : npos.y;
newPos[idx] = make_float4(npos, radius1);
}
//assume aligned point dipoles
__global__ void pointDipK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForce, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
const float forceFactor, //i: 4/3*pi*mu0*M^2
const float deltaTime) //i: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
if(lsq <= nparams.forcedist_sq*sepdist*sepdist) {
float inv_dist = rsqrtf(lsq);
er = er*inv_dist;
float3 f_ij = er*(1 - 5*er.y*er.y);
f_ij.y += 2*er.y;
f_ij *= inv_dist*inv_dist*inv_dist*inv_dist;
float inv_sep = 1.0f/sepdist;
f_ij += 2.0f*(inv_sep*inv_sep*inv_sep*inv_sep)*
expf(-nparams.spring*((1.0f/inv_dist)*inv_sep - 1.0f))*er;
//multiply by the "volume" of rad2
f_ij *= (radius2*radius2*radius2);
force += f_ij;
}
}
//convert force into physical units
force *= forceFactor*(radius1*radius1*radius1);
dForce[idx] = make_float4(force,0.0f);
applyBC(idx, deltaTime, radius1, p1, force, integrPos, newPos);
}
__global__ void magForcesK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* dMom, //i: the moment
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForce, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
float deltaTime) //o: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float4 mom1 = dMom[idx];
//float4 mom1 = tex1Dfetch(mom_tex,idx);
float3 m1 = make_float3(mom1);
float Cp1 = mom1.w;
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float4 mom2 = tex1Dfetch(mom_tex, neighbor);
float3 m2 = make_float3(mom2);
float Cp2 = mom2.w;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
er = er*rsqrtf(lsq);
if(lsq <= nparams.forcedist_sq*sepdist*sepdist) {
float dm1m2 = dot(m1,m2);
float dm1er = dot(m1,er);
float dm2er = dot(m2,er);
force += 3.0f*MU_0*MU_C/(4*PI_F*lsq*lsq) *( dm1m2*er + dm1er*m2
+ dm2er*m1 - 5.0f*dm1er*dm2er*er);
//create a false moment for nonmagnetic particles
//note that here Cp gives the wrong volume, so the magnitude of
//the repulsion strength is wrong
m1 = (Cp1 == 0.0f) ? nparams.Cpol*nparams.extH : m1;
m2 = (Cp2 == 0.0f) ? nparams.Cpol*nparams.extH : m2;
dm1m2 = dot(m1,m2);
force += 3.0f*MU_0*MU_C*length(m1)*length(m2)/(2.0f*PI_F*sepdist*sepdist*sepdist*sepdist)*
expf(-nparams.spring*(sqrtf(lsq)/sepdist - 1))*er;
}
}
dForce[idx] = make_float4(force,0.0f);
applyBC(idx, deltaTime, radius1, p1, force, integrPos, newPos);
}
// for uniform finite dipoles
__global__ void finiteDipK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForce, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
const float dipole_d, //i: finite dipole distance in units of diameters
const float F0, //i: point dipole F_0
const float sigma_0, //i: reference diam
float deltaTime) //o: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float3 force = make_float3(0,0,0);
const float finite_pre = sigma_0*sigma_0/(3.0f*dipole_d*dipole_d);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float3 dr = p1 - p2;//start it out as dr, then modify to get er
dr.x = dr.x - nparams.L.x*rintf(dr.x*nparams.Linv.x);
dr.z = dr.z - nparams.L.x*rintf(dr.z*nparams.Linv.z);
float lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
float3 er = dr*rsqrtf(lsq);
if(lsq <= nparams.forcedist_sq*sepdist*sepdist) {
//er_0
//create f_ij to attempt to fix numerical precision problems at small dipole_d
force += 2*expf(-nparams.spring*(sqrtf(lsq)/sepdist - 1))*er;
float3 f_ij = (2.0f/lsq)*er;
//er_+
dr.y += dipole_d*sigma_0;
lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; //replace with 2d*dr.y + d^2?
er = dr*rsqrtf(lsq);
f_ij += (-1.0f/lsq)*er;
//er_-
dr.y -= 2.0f*dipole_d*sigma_0;
lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
er = dr*rsqrtf(lsq);
f_ij += (-1.0f/lsq)*er;
force += finite_pre*f_ij;
}
}
//convert back to real units
force *= F0;
dForce[idx] = make_float4(force,0.0f);
applyBC(idx, deltaTime, radius1, p1, force, integrPos, newPos);
}
__global__ void vertEdgeK(const uint* nlist,
const uint* num_neigh,
const float4* dPos,
uint* conn,
float maxcosth, //cos(th) above which the counter increments
float maxdistsq)
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
uint connections = 0;
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = dPos[neighbor];
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
er = er*rsqrtf(lsq);
if( (lsq < maxdistsq*sepdist*sepdist) && fabs(er.y) >= maxcosth)
connections++;
}
conn[idx] = connections; //for this to work, nlist must be regenned immediately after calling this
}
//note: gives non-physical results
__global__ void magFricForcesK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* dMom, //i: the moment
const float4* dForceIn, //i: the old force, used to find velocity
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForceOut, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
float static_fric, //maximum static friction
float deltaTime) //i: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float Cd1 = 6.0f*PI_F*radius1*nparams.visc;
float4 mom1 = dMom[idx];
//float4 mom1 = tex1Dfetch(mom_tex,idx);
float3 m1 = make_float3(mom1);
float Cp1 = mom1.w;
float3 f1 = make_float3(dForceIn[idx]);
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float Cd2 = 6.0f*PI_F*radius1*nparams.visc;
float4 mom2 = tex1Dfetch(mom_tex, neighbor);
float3 m2 = make_float3(mom2);
float Cp2 = mom2.w;
float3 f2 = make_float3(dForceIn[idx]);
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
er = er*rsqrtf(lsq);
if(lsq <= nparams.max_fdr_sq){
float dm1m2 = dot(m1,m2);
float dm1er = dot(m1,er);
float dm2er = dot(m2,er);
force += 3.0f*MU_0*MU_C/(4*PI_F*lsq*lsq) *( dm1m2*er + dm1er*m2
+ dm2er*m1 - 5.0f*dm1er*dm2er*er);
//create a false moment for nonmagnetic particles
//note that here Cp gives the wrong volume, so the magnitude of
//the repulsion strength is wrong
m1 = (Cp1 == 0.0f) ? nparams.Cpol*nparams.extH : m1;
m2 = (Cp2 == 0.0f) ? nparams.Cpol*nparams.extH : m2;
dm1m2 = dot(m1,m2);
float sepdist = radius1 + radius2;
float normalforce = 3.0f*MU_0*MU_C*dm1m2/(2.0f*PI_F*sepdist*sepdist*sepdist*sepdist)*
expf(-nparams.spring*(sqrtf(lsq)/sepdist - 1.0f));
force += normalforce*er;
if(lsq <= sepdist*sepdist){
float3 v1 = f1/Cd1 + nparams.shear*p1.y;
v1 = (p1.y >= nparams.L.y - nparams.pin_d*radius1) ?
make_float3(nparams.shear*nparams.L.y,0.0f,0.0f) : v1;
float3 v2 = f2/Cd2 + nparams.shear*p2.y;
v2 = (p2.y >= nparams.L.y - nparams.pin_d*radius2) ?
make_float3(nparams.shear*nparams.L.y,0.0f,0.0f) : v2;
float3 relvel = v1 - v2;
float3 tanvel = relvel - dot(er,relvel)*er;
float tanv_sq = tanvel.x*tanvel.x + tanvel.y*tanvel.y + tanvel.z*tanvel.z;
if(tanv_sq*nparams.tanfric*nparams.tanfric < normalforce*normalforce) {
force -= tanvel*nparams.tanfric;
} else {
tanvel = tanvel*rsqrtf(tanv_sq); //make it a unit vector;
force -= tanvel*static_fric*normalforce;
}
}
}
}
dForceOut[idx] = make_float4(force,0.0f);
float ybot = p1.y - nparams.origin.y;
force.x += nparams.shear*ybot*Cd1;
//apply flow BCs
if(ybot < nparams.pin_d*radius1)
force = make_float3(0,0,0);
if(ybot > nparams.L.y - nparams.pin_d*radius1)
force = make_float3(nparams.shear*nparams.L.y*Cd1,0,0);
float3 ipos = make_float3(integrPos[idx]);
newPos[idx] = make_float4(ipos + force/Cd1*deltaTime, radius1);
}
__global__ void mutualMagnK(const float4* pos,
const float4* oldMag,
float4* newMag,
const uint* nlist,
const uint* numNeigh)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N) return;
uint n_neigh = numNeigh[idx];
float4 pos1 = pos[idx];
float3 p1 = make_float3(pos1);
//float radius1 = pos1.w;
float4 omag = oldMag[idx];
float3 mom1 = make_float3(omag);
float Cp1 = omag.w;
if(Cp1 == 0.0f) { //if nonmagnetic
newMag[idx] = make_float4(0.0f,0.0f,0.0f,Cp1);
return;
}
float3 H = nparams.extH;
for(uint i = 0; i < n_neigh; i++) {
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
//float radius2 = pos2.w;
float4 mom2 = tex1Dfetch(mom_tex, neighbor);
float3 m2 = make_float3(mom2);
//float Cp2 = mom2.w;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
if(lsq <= nparams.max_fdr_sq) {
float invdist = rsqrtf(lsq);
er = er*invdist;
H += 1.0f/(4.0f*PI_F)*(3.0f*dot(m2,er)*er - m2)*invdist*invdist*invdist;
}
}
newMag[idx] = make_float4(Cp1*H, Cp1);
}
__global__ void integrateRK4K(
const float4* oldPos,
float4* PosA,
const float4* PosB,
const float4* PosC,
const float4* PosD,
float4* forceA,
const float4* forceB,
const float4* forceC,
const float4* forceD,
const float deltaTime,
const uint numParticles)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numParticles) return; // handle case when no. of particles not multiple of block size
float4 old = oldPos[index];
float3 oldp = make_float3(old);
float radius = old.w;
//compite k1,k2, we use a factor of 2.0, because they're done with a timestep of 0.5*dt
float3 k1 = 2.0f*(make_float3(PosA[index]) - oldp);
float3 k2 = 2.0f*(make_float3(PosB[index]) - oldp);
float3 k3 = make_float3(PosC[index]) - oldp;
float3 k4 = make_float3(PosD[index]) - oldp;
oldp += (1.0f/6.0f)*(k1 + 2.0f*k2 + 2.0f*k3 + k4);
oldp.x -= nparams.L.x*rintf(oldp.x*nparams.Linv.x);//this runs the risk of floating point errors pushing things outside the box
oldp.z -= nparams.L.z*rintf(oldp.z*nparams.Linv.z);
if (oldp.y > -1.0f*nparams.origin.y - radius ) { oldp.y = -1.0f*nparams.origin.y - radius;}
if (oldp.y < nparams.origin.y + radius ) { oldp.y = nparams.origin.y + radius; }
PosA[index] = make_float4(oldp, radius);
float4 f1 = forceA[index];
float nothin = f1.w;//doesn't actually hold any value, but might someday
float3 force1 = make_float3(f1);
float3 force2 = make_float3(forceB[index]);
float3 force3 = make_float3(forceC[index]);
float3 force4 = make_float3(forceD[index]);
float3 fcomp = (force1 + 2*force2 + 2*force3 + force4)/6.0f;//trapezoid rule
forceA[index] = make_float4(fcomp, nothin);//averaged force
}
__global__ void bogacki_ynp1k(
const float4* d_yn,
const float4* d_ynpk1,
const float4* d_ynpk2,
const float4* d_ynpk3,
float4* d_ynp1,
const float deltaTime,
const uint numParticles)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numParticles) return; // handle case when no. of particles not multiple of block size
float4 old = d_yn[index];
float3 yn = make_float3(old);
float radius = old.w;
float3 k1 = 2.0f*(make_float3(d_ynpk1[index]) - yn);
float3 k2 = 4.0/3.0f*(make_float3(d_ynpk2[index]) - yn);
float3 k3 = make_float3(d_ynpk3[index]) - yn;
//float Cd = 6*PI_F*nparams.visc*radius;
float3 ynp1 = yn + (1.0f/9.0f)*(2.0f*k1 + 3.0f*k2 + 4.0f*k3);
ynp1.x -= nparams.L.x*rintf(ynp1.x*nparams.Linv.x);//this runs the risk of floating point errors pushing things outside the box
ynp1.z -= nparams.L.z*rintf(ynp1.z*nparams.Linv.z);
if (ynp1.y > -1.0f*nparams.origin.y - radius ) { ynp1.y = -1.0f*nparams.origin.y - radius;}
if (ynp1.y < nparams.origin.y + radius ) { ynp1.y = nparams.origin.y + radius; }
d_ynp1[index] = make_float4(ynp1, radius);
}
__global__ void collisionK( const float4* sortedPos, //i: pos we use to calculate forces
const float4* oldVel,
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* newVel, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
float radExp,
float deltaTime) //i: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = sortedPos[idx];
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float3 v1 = make_float3(oldVel[idx]);
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float3 v2 = make_float3(tex1Dfetch(vel_tex,neighbor));
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float dist = sqrtf(er.x*er.x + er.y*er.y + er.z*er.z);
float sepdist = radExp*(radius1 + radius2);
//do a quicky spring
if(dist <= sepdist){
er = er/dist;
float3 relVel = v2-v1;
force += -1e7f*sepdist*(dist - sepdist)*er;
force += .08f*relVel;
}
}
//yes this integration is totally busted, but it works, soooo
v1 = (v1 + force*deltaTime/(2e3f*radius1))*.8f;
p1 = p1 + v1*deltaTime;
p1.x -= nparams.L.x * rintf(p1.x*nparams.Linv.x);
p1.z -= nparams.L.x * rintf(p1.z*nparams.Linv.z);
if(p1.y+radius1 > -nparams.origin.y){
p1.y = -nparams.origin.y - radius1;
v1.y*= -.03f;
}
if(p1.y-radius1 < nparams.origin.y){
p1.y = nparams.origin.y + radius1;
v1.y*= -.03f;
}
newVel[idx] = make_float4(v1);
newPos[idx] = make_float4(p1, radius1);
}
| bb6fd99402271da5b15453af9db286c30b5d7d68.cu | #ifndef NEW_KERN_CU
#define NEW_KERN_CU
#endif
#define PI_F 3.141592653589793f
#define MU_0 4e-7f*PI_F
#ifndef MU_C
#define MU_C 1
#endif
#include <helper_math.h>
#include "new_kern.h"
__constant__ NewParams nparams;
texture<float4, cudaTextureType1D, cudaReadModeElementType> pos_tex;
texture<float4, cudaTextureType1D, cudaReadModeElementType> mom_tex;
texture<float4, cudaTextureType1D, cudaReadModeElementType> vel_tex;
__device__ uint3 calcGPos(float3 p)
{
uint3 gpos;
gpos.x = floorf((p.x - nparams.origin.x)/nparams.cellSize.x);
gpos.y = floorf((p.y - nparams.origin.y)/nparams.cellSize.y);
gpos.z = floorf((p.z - nparams.origin.z)/nparams.cellSize.z);
gpos.x = (nparams.gridSize.x + gpos.x) % nparams.gridSize.x;
gpos.y = (nparams.gridSize.y + gpos.y) % nparams.gridSize.y;
gpos.z = (nparams.gridSize.z + gpos.z) % nparams.gridSize.z;
return gpos;
}
__global__ void comp_phashK(const float4* d_pos, uint* d_pHash, uint* d_pIndex, const uint* d_CellHash)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
float4 pos = d_pos[idx];
float3 p = make_float3(pos);
//float rad = pos.w;
uint3 gpos = calcGPos(p);
uint cell_id = gpos.x + gpos.y*nparams.gridSize.x +
gpos.z*nparams.gridSize.y*nparams.gridSize.x;
d_pIndex[idx] = idx;
d_pHash[idx] = d_CellHash[cell_id] ;//+ (rad < 3e-6f ? nparams.numCells : 0 );
}
__global__ void findCellStartK(uint* cellStart, //o: cell starts
uint* cellEnd, //o: cell ends
uint* phash) //i: hashes sorted by hash
{
extern __shared__ uint sharedHash[]; //size of blockDim+1
uint index = blockIdx.x*blockDim.x + threadIdx.x;
uint hash;
if(index < nparams.N )
{
hash = phash[index];
//load all neighboring hashes into memory
sharedHash[threadIdx.x+1] = hash;
if(index > 0 && threadIdx.x == 0)
sharedHash[0] = phash[index-1];
}
__syncthreads();
if(index < nparams.N)
{
//once load complete, compare to hash before and if !=, then write starts/ends
if(index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)// if not first cell
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == nparams.N - 1){//if the last particle, the cell ends here
cellEnd[hash] = index+1;
}
}
}
__global__ void reorderK(const uint* dSortedIndex, float4* sortedA,
float4* sortedB, const float4* oldA,
const float4* oldB)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint sortedIdx = dSortedIndex[idx];
sortedA[idx] = oldA[sortedIdx];
sortedB[idx] = oldB[sortedIdx];
}
__device__ inline void applyBC(uint idx, float deltaTime, const float radius1,
const float3 p1, float3 force, const float4* integrPos, float4* newPos)
{
float Cd = 6.0f*PI_F*radius1*nparams.visc;
float ybot = p1.y - nparams.origin.y;
force.x += nparams.shear*ybot*Cd;
//apply flow BCs
if(ybot <= nparams.pin_d*radius1)
force = make_float3(0,0,0);
if(ybot >= nparams.L.y - nparams.pin_d*radius1)
force = make_float3(nparams.shear*nparams.L.y*Cd,0,0);
float3 ipos = make_float3(integrPos[idx]);
float3 npos = ipos + force/Cd*deltaTime;
float edge = 0.5f*nparams.L.y - radius1;
npos.y = npos.y > edge ? edge : npos.y;
npos.y = npos.y < -edge ? -edge : npos.y;
newPos[idx] = make_float4(npos, radius1);
}
//assume aligned point dipoles
__global__ void pointDipK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForce, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
const float forceFactor, //i: 4/3*pi*mu0*M^2
const float deltaTime) //i: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
if(lsq <= nparams.forcedist_sq*sepdist*sepdist) {
float inv_dist = rsqrtf(lsq);
er = er*inv_dist;
float3 f_ij = er*(1 - 5*er.y*er.y);
f_ij.y += 2*er.y;
f_ij *= inv_dist*inv_dist*inv_dist*inv_dist;
float inv_sep = 1.0f/sepdist;
f_ij += 2.0f*(inv_sep*inv_sep*inv_sep*inv_sep)*
expf(-nparams.spring*((1.0f/inv_dist)*inv_sep - 1.0f))*er;
//multiply by the "volume" of rad2
f_ij *= (radius2*radius2*radius2);
force += f_ij;
}
}
//convert force into physical units
force *= forceFactor*(radius1*radius1*radius1);
dForce[idx] = make_float4(force,0.0f);
applyBC(idx, deltaTime, radius1, p1, force, integrPos, newPos);
}
__global__ void magForcesK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* dMom, //i: the moment
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForce, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
float deltaTime) //o: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float4 mom1 = dMom[idx];
//float4 mom1 = tex1Dfetch(mom_tex,idx);
float3 m1 = make_float3(mom1);
float Cp1 = mom1.w;
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float4 mom2 = tex1Dfetch(mom_tex, neighbor);
float3 m2 = make_float3(mom2);
float Cp2 = mom2.w;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
er = er*rsqrtf(lsq);
if(lsq <= nparams.forcedist_sq*sepdist*sepdist) {
float dm1m2 = dot(m1,m2);
float dm1er = dot(m1,er);
float dm2er = dot(m2,er);
force += 3.0f*MU_0*MU_C/(4*PI_F*lsq*lsq) *( dm1m2*er + dm1er*m2
+ dm2er*m1 - 5.0f*dm1er*dm2er*er);
//create a false moment for nonmagnetic particles
//note that here Cp gives the wrong volume, so the magnitude of
//the repulsion strength is wrong
m1 = (Cp1 == 0.0f) ? nparams.Cpol*nparams.extH : m1;
m2 = (Cp2 == 0.0f) ? nparams.Cpol*nparams.extH : m2;
dm1m2 = dot(m1,m2);
force += 3.0f*MU_0*MU_C*length(m1)*length(m2)/(2.0f*PI_F*sepdist*sepdist*sepdist*sepdist)*
expf(-nparams.spring*(sqrtf(lsq)/sepdist - 1))*er;
}
}
dForce[idx] = make_float4(force,0.0f);
applyBC(idx, deltaTime, radius1, p1, force, integrPos, newPos);
}
// for uniform finite dipoles
__global__ void finiteDipK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForce, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
const float dipole_d, //i: finite dipole distance in units of diameters
const float F0, //i: point dipole F_0
const float sigma_0, //i: reference diam
float deltaTime) //o: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float3 force = make_float3(0,0,0);
const float finite_pre = sigma_0*sigma_0/(3.0f*dipole_d*dipole_d);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float3 dr = p1 - p2;//start it out as dr, then modify to get er
dr.x = dr.x - nparams.L.x*rintf(dr.x*nparams.Linv.x);
dr.z = dr.z - nparams.L.x*rintf(dr.z*nparams.Linv.z);
float lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
float3 er = dr*rsqrtf(lsq);
if(lsq <= nparams.forcedist_sq*sepdist*sepdist) {
//er_0
//create f_ij to attempt to fix numerical precision problems at small dipole_d
force += 2*expf(-nparams.spring*(sqrtf(lsq)/sepdist - 1))*er;
float3 f_ij = (2.0f/lsq)*er;
//er_+
dr.y += dipole_d*sigma_0;
lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; //replace with 2d*dr.y + d^2?
er = dr*rsqrtf(lsq);
f_ij += (-1.0f/lsq)*er;
//er_-
dr.y -= 2.0f*dipole_d*sigma_0;
lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
er = dr*rsqrtf(lsq);
f_ij += (-1.0f/lsq)*er;
force += finite_pre*f_ij;
}
}
//convert back to real units
force *= F0;
dForce[idx] = make_float4(force,0.0f);
applyBC(idx, deltaTime, radius1, p1, force, integrPos, newPos);
}
__global__ void vertEdgeK(const uint* nlist,
const uint* num_neigh,
const float4* dPos,
uint* conn,
float maxcosth, //cos(th) above which the counter increments
float maxdistsq)
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
uint connections = 0;
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = dPos[neighbor];
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float sepdist = radius1 + radius2;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
er = er*rsqrtf(lsq);
if( (lsq < maxdistsq*sepdist*sepdist) && fabs(er.y) >= maxcosth)
connections++;
}
conn[idx] = connections; //for this to work, nlist must be regenned immediately after calling this
}
//note: gives non-physical results
__global__ void magFricForcesK( const float4* dSortedPos, //i: pos we use to calculate forces
const float4* dMom, //i: the moment
const float4* dForceIn, //i: the old force, used to find velocity
const float4* integrPos, //i: pos we use as base to integrate from
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* dForceOut, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
float static_fric, //maximum static friction
float deltaTime) //i: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = dSortedPos[idx];
//float4 pos1 = tex1Dfetch(pos_tex,idx);
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float Cd1 = 6.0f*PI_F*radius1*nparams.visc;
float4 mom1 = dMom[idx];
//float4 mom1 = tex1Dfetch(mom_tex,idx);
float3 m1 = make_float3(mom1);
float Cp1 = mom1.w;
float3 f1 = make_float3(dForceIn[idx]);
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float Cd2 = 6.0f*PI_F*radius1*nparams.visc;
float4 mom2 = tex1Dfetch(mom_tex, neighbor);
float3 m2 = make_float3(mom2);
float Cp2 = mom2.w;
float3 f2 = make_float3(dForceIn[idx]);
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
er = er*rsqrtf(lsq);
if(lsq <= nparams.max_fdr_sq){
float dm1m2 = dot(m1,m2);
float dm1er = dot(m1,er);
float dm2er = dot(m2,er);
force += 3.0f*MU_0*MU_C/(4*PI_F*lsq*lsq) *( dm1m2*er + dm1er*m2
+ dm2er*m1 - 5.0f*dm1er*dm2er*er);
//create a false moment for nonmagnetic particles
//note that here Cp gives the wrong volume, so the magnitude of
//the repulsion strength is wrong
m1 = (Cp1 == 0.0f) ? nparams.Cpol*nparams.extH : m1;
m2 = (Cp2 == 0.0f) ? nparams.Cpol*nparams.extH : m2;
dm1m2 = dot(m1,m2);
float sepdist = radius1 + radius2;
float normalforce = 3.0f*MU_0*MU_C*dm1m2/(2.0f*PI_F*sepdist*sepdist*sepdist*sepdist)*
expf(-nparams.spring*(sqrtf(lsq)/sepdist - 1.0f));
force += normalforce*er;
if(lsq <= sepdist*sepdist){
float3 v1 = f1/Cd1 + nparams.shear*p1.y;
v1 = (p1.y >= nparams.L.y - nparams.pin_d*radius1) ?
make_float3(nparams.shear*nparams.L.y,0.0f,0.0f) : v1;
float3 v2 = f2/Cd2 + nparams.shear*p2.y;
v2 = (p2.y >= nparams.L.y - nparams.pin_d*radius2) ?
make_float3(nparams.shear*nparams.L.y,0.0f,0.0f) : v2;
float3 relvel = v1 - v2;
float3 tanvel = relvel - dot(er,relvel)*er;
float tanv_sq = tanvel.x*tanvel.x + tanvel.y*tanvel.y + tanvel.z*tanvel.z;
if(tanv_sq*nparams.tanfric*nparams.tanfric < normalforce*normalforce) {
force -= tanvel*nparams.tanfric;
} else {
tanvel = tanvel*rsqrtf(tanv_sq); //make it a unit vector;
force -= tanvel*static_fric*normalforce;
}
}
}
}
dForceOut[idx] = make_float4(force,0.0f);
float ybot = p1.y - nparams.origin.y;
force.x += nparams.shear*ybot*Cd1;
//apply flow BCs
if(ybot < nparams.pin_d*radius1)
force = make_float3(0,0,0);
if(ybot > nparams.L.y - nparams.pin_d*radius1)
force = make_float3(nparams.shear*nparams.L.y*Cd1,0,0);
float3 ipos = make_float3(integrPos[idx]);
newPos[idx] = make_float4(ipos + force/Cd1*deltaTime, radius1);
}
__global__ void mutualMagnK(const float4* pos,
const float4* oldMag,
float4* newMag,
const uint* nlist,
const uint* numNeigh)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N) return;
uint n_neigh = numNeigh[idx];
float4 pos1 = pos[idx];
float3 p1 = make_float3(pos1);
//float radius1 = pos1.w;
float4 omag = oldMag[idx];
float3 mom1 = make_float3(omag);
float Cp1 = omag.w;
if(Cp1 == 0.0f) { //if nonmagnetic
newMag[idx] = make_float4(0.0f,0.0f,0.0f,Cp1);
return;
}
float3 H = nparams.extH;
for(uint i = 0; i < n_neigh; i++) {
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
//float radius2 = pos2.w;
float4 mom2 = tex1Dfetch(mom_tex, neighbor);
float3 m2 = make_float3(mom2);
//float Cp2 = mom2.w;
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float lsq = er.x*er.x + er.y*er.y + er.z*er.z;
if(lsq <= nparams.max_fdr_sq) {
float invdist = rsqrtf(lsq);
er = er*invdist;
H += 1.0f/(4.0f*PI_F)*(3.0f*dot(m2,er)*er - m2)*invdist*invdist*invdist;
}
}
newMag[idx] = make_float4(Cp1*H, Cp1);
}
__global__ void integrateRK4K(
const float4* oldPos,
float4* PosA,
const float4* PosB,
const float4* PosC,
const float4* PosD,
float4* forceA,
const float4* forceB,
const float4* forceC,
const float4* forceD,
const float deltaTime,
const uint numParticles)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numParticles) return; // handle case when no. of particles not multiple of block size
float4 old = oldPos[index];
float3 oldp = make_float3(old);
float radius = old.w;
//compite k1,k2, we use a factor of 2.0, because they're done with a timestep of 0.5*dt
float3 k1 = 2.0f*(make_float3(PosA[index]) - oldp);
float3 k2 = 2.0f*(make_float3(PosB[index]) - oldp);
float3 k3 = make_float3(PosC[index]) - oldp;
float3 k4 = make_float3(PosD[index]) - oldp;
oldp += (1.0f/6.0f)*(k1 + 2.0f*k2 + 2.0f*k3 + k4);
oldp.x -= nparams.L.x*rintf(oldp.x*nparams.Linv.x);//this runs the risk of floating point errors pushing things outside the box
oldp.z -= nparams.L.z*rintf(oldp.z*nparams.Linv.z);
if (oldp.y > -1.0f*nparams.origin.y - radius ) { oldp.y = -1.0f*nparams.origin.y - radius;}
if (oldp.y < nparams.origin.y + radius ) { oldp.y = nparams.origin.y + radius; }
PosA[index] = make_float4(oldp, radius);
float4 f1 = forceA[index];
float nothin = f1.w;//doesn't actually hold any value, but might someday
float3 force1 = make_float3(f1);
float3 force2 = make_float3(forceB[index]);
float3 force3 = make_float3(forceC[index]);
float3 force4 = make_float3(forceD[index]);
float3 fcomp = (force1 + 2*force2 + 2*force3 + force4)/6.0f;//trapezoid rule
forceA[index] = make_float4(fcomp, nothin);//averaged force
}
__global__ void bogacki_ynp1k(
const float4* d_yn,
const float4* d_ynpk1,
const float4* d_ynpk2,
const float4* d_ynpk3,
float4* d_ynp1,
const float deltaTime,
const uint numParticles)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numParticles) return; // handle case when no. of particles not multiple of block size
float4 old = d_yn[index];
float3 yn = make_float3(old);
float radius = old.w;
float3 k1 = 2.0f*(make_float3(d_ynpk1[index]) - yn);
float3 k2 = 4.0/3.0f*(make_float3(d_ynpk2[index]) - yn);
float3 k3 = make_float3(d_ynpk3[index]) - yn;
//float Cd = 6*PI_F*nparams.visc*radius;
float3 ynp1 = yn + (1.0f/9.0f)*(2.0f*k1 + 3.0f*k2 + 4.0f*k3);
ynp1.x -= nparams.L.x*rintf(ynp1.x*nparams.Linv.x);//this runs the risk of floating point errors pushing things outside the box
ynp1.z -= nparams.L.z*rintf(ynp1.z*nparams.Linv.z);
if (ynp1.y > -1.0f*nparams.origin.y - radius ) { ynp1.y = -1.0f*nparams.origin.y - radius;}
if (ynp1.y < nparams.origin.y + radius ) { ynp1.y = nparams.origin.y + radius; }
d_ynp1[index] = make_float4(ynp1, radius);
}
__global__ void collisionK( const float4* sortedPos, //i: pos we use to calculate forces
const float4* oldVel,
const uint* nlist, //i: the neighbor list
const uint* num_neigh, //i: the number of inputs
float4* newVel, //o: the magnetic force on a particle
float4* newPos, //o: the integrated position
float radExp,
float deltaTime) //i: the timestep
{
uint idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx >= nparams.N)
return;
uint n_neigh = num_neigh[idx];
float4 pos1 = sortedPos[idx];
float3 p1 = make_float3(pos1);
float radius1 = pos1.w;
float3 v1 = make_float3(oldVel[idx]);
float3 force = make_float3(0,0,0);
for(uint i = 0; i < n_neigh; i++)
{
uint neighbor = nlist[i*nparams.N + idx];
float4 pos2 = tex1Dfetch(pos_tex, neighbor);
float3 p2 = make_float3(pos2);
float radius2 = pos2.w;
float3 v2 = make_float3(tex1Dfetch(vel_tex,neighbor));
float3 er = p1 - p2;//start it out as dr, then modify to get er
er.x = er.x - nparams.L.x*rintf(er.x*nparams.Linv.x);
er.z = er.z - nparams.L.x*rintf(er.z*nparams.Linv.z);
float dist = sqrtf(er.x*er.x + er.y*er.y + er.z*er.z);
float sepdist = radExp*(radius1 + radius2);
//do a quicky spring
if(dist <= sepdist){
er = er/dist;
float3 relVel = v2-v1;
force += -1e7f*sepdist*(dist - sepdist)*er;
force += .08f*relVel;
}
}
//yes this integration is totally busted, but it works, soooo
v1 = (v1 + force*deltaTime/(2e3f*radius1))*.8f;
p1 = p1 + v1*deltaTime;
p1.x -= nparams.L.x * rintf(p1.x*nparams.Linv.x);
p1.z -= nparams.L.x * rintf(p1.z*nparams.Linv.z);
if(p1.y+radius1 > -nparams.origin.y){
p1.y = -nparams.origin.y - radius1;
v1.y*= -.03f;
}
if(p1.y-radius1 < nparams.origin.y){
p1.y = nparams.origin.y + radius1;
v1.y*= -.03f;
}
newVel[idx] = make_float4(v1);
newPos[idx] = make_float4(p1, radius1);
}
|
765a712bec9755beb561ced3cae77f0b059b6c91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "cblas.h"
#include "SparsePSA_cuda_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n)\
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;\
i += blockDim.x * gridDim.x)
/******************************************ForwardFunction*******************************/
//DownSample
__global__ void DownSampleForward_gpu(int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *bottom1, float *pool1){//pool10
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
pool1[(n*full_feature_channels + c)*down_feature_h*down_feature_w + h*down_feature_w + w]=//!!!channle
bottom1[(n*full_feature_channels + c)*full_feature_h*full_feature_w + h_index*full_feature_w + w_index];
}
}
}
}
//UpSample
__global__ void UpSampleForward_gpu(int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *Re1, float *top){//top0
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
top[(n*full_feature_channels + c)*full_feature_h*full_feature_w + h_index*full_feature_w + w_index]=
Re1[(n*full_feature_channels + c)*down_feature_h*down_feature_w + h*down_feature_h + w];
}
}
}
}
//Distribute
__global__ void DistributeForward_gpu(int nthreads, int down_feature_h, int down_feature_w, float *bottom2, float *atten1){
int feature_channels = down_feature_h*down_feature_w;
CUDA_1D_KERNEL_LOOP(index, nthreads){//channel loop // index??? nthreads???
int w = index % down_feature_w;
int h = (index / down_feature_w) % down_feature_h;//location in map, h&w choose channel
int n = index /down_feature_h/down_feature_w;
for(int hindx=0; h<down_feature_h; h++ ){
for(int windx=0; w<down_feature_w; w++){
atten1[(n*feature_channels + (h*down_feature_w + w))*down_feature_h*down_feature_w + hindx*down_feature_w +windx]=
bottom2[(n*feature_channels + hindx*down_feature_w +windx)*down_feature_h*down_feature_w + h*down_feature_w + w];
}
}
}
}
//main function
void SparsePSAForward_gpu_kernel(int type, int kernel, int batch, int full_feature_channels, int full_feature_h, int full_feature_w,
float *pool, float *Re, float *atten,
float *bottom1, float *bottom2, float *top, hipStream_t stream){
int down_feature_h=(full_feature_h-(kernel-1))/kernel + 1;
int down_feature_w=(full_feature_w-(kernel-1))/kernel + 1;
pool1 = pool;
Re1 = Re;
atten1 = atten;
int nthreads_DownUp = batch*full_feature_channels;
int nthreads_DistriCollect = batch*down_feature_h*down_feature_w;
int kThreadsPerBlock = 1024;
float normalization_factor_ = float(down_feature_h * down_feature_w);
//bottom1 -> pool1
hipLaunchKernelGGL(( DownSampleForward_gpu), dim3((nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
nthreads_DownUp, full_feature_channels, full_feature_h, full_feature_w, down_feature_h, down_feature_w, kernel, bottom1, pool1);//output pool1
//bottom2 -> atten1
switch(type){//0:collect 1:distribute
case 0:
atten1 = bottom2;
case 1:
hipLaunchKernelGGL(( DistributeForward_gpu), dim3((nthreads_DistriCollect + kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
nthreads_DistriCollect, down_feature_h, down_feature_w, bottom2, atten1);
}
//pool1&atten1 -> Re1
for(int n = 0; n < batch; n++) {
float *this_pool1 = pool1 + n*full_feature_channels*down_feature_h*down_feature_w;
float *this_atten1 = atten1 + n*(down_feature_h*down_feature_w)*(down_feature_h*down_feature_w);
float *this_Re1 = Re1 + n*full_feature_channels*down_feature_h*down_feature_w;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, full_feature_channels, down_feature_h*down_feature_w, down_feature_h*down_feature_w,
float(1.0/normalization_factor_), this_pool1, down_feature_h*down_feature_w,
this_atten1, down_feature_h*down_feature_w,
float(0), this_Re1, down_feature_h*down_feature_w);
}
//Re1 -> top
hipLaunchKernelGGL(( UpSampleForward_gpu), dim3((nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
nthreads_DownUp, full_feature_channels, full_feature_h,full_feature_w ,down_feature_h, down_feature_w, kernel, Re1, top);//output top
}
/**************************************Backward Functioon*****************************/
//DownSample
__global__ void DownSampleBackward_gpu (int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *pool1_diff, float *bottom1_diff){
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
bottom1_diff[(n*full_feature_channels+c)*full_feature_h*full_feature_w + h_index*full_feature_h + w_index]=
pool1_diff[(n*full_feature_channels+c)*down_feature_h*down_feature_w + h*down_feature_h + w];
}
}
}
}
//Upsample
__global__ void UpSampleBackward_gpu(int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *top_diff, float *Re1_diff){//top0
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
Re1_diff[(n*full_feature_channels+c)*down_feature_h*down_feature_w + h*down_feature_h + w] =
top_diff[(n*full_feature_channels+c)*full_feature_h*full_feature_w + h_index*full_feature_h + w_index];
}
}
}
}
//Distribute
__global__ void DistributeBackward_gpu(int nthreads, int down_feature_h, int down_feature_w, float *atten1_diff, float *bottom2_diff){
int feature_channels = down_feature_h*down_feature_w;
CUDA_1D_KERNEL_LOOP(index, nthreads){//channel loop // index??? nthreads???
int w = index % down_feature_w;
int h = (index / down_feature_w) % down_feature_h;//location in map, h&w choose channel
int n = index /down_feature_h/down_feature_w;
for(int hindx=0; h<down_feature_h; h++ ){
for(int windx=0; w<down_feature_w; w++){
bottom2_diff[(n*feature_channels + hindx*down_feature_w +windx)*down_feature_h*down_feature_w + h*down_feature_w + w]=
atten1_diff[(n*feature_channels + (h*down_feature_w + w))*down_feature_h*down_feature_w + hindx*down_feature_w +windx];
}
}
}
}
//main function
void SparsePSABackward_gpu_kernel(int type, int kernel, int batch, int full_feature_channels, int full_feature_h, int full_feature_w,
float *bottom1, float *bottom2, float *top,
float *bottom1_diff, float *bottom2_diff, float *top_diff, hipStream_t stream){
int down_feature_h=(full_feature_h-(kernel-1))/kernel + 1;
int down_feature_w=(full_feature_w-(kernel-1))/kernel + 1;
float *pool1_diff, *Re1_diff, *atten1_diff;
hipMallocManaged((void**)&pool1_diff, sizeof(float)*batch*full_feature_channels*down_feature_h*down_feature_w);
hipMallocManaged((void**)&Re1_diff, sizeof(float)*batch*full_feature_channels*down_feature_h*down_feature_w);
hipMallocManaged((void**)&atten1_diff, sizeof(float)*batch*down_feature_h*down_feature_w*down_feature_h*down_feature_w);
hipMemset(pool1,0,sizeof(pool1_diff));
hipMemset(Re1,0,sizeof(Re1_diff));
hipMemset(atten1,0,sizeof(atten1_diff));
int nthreads_DownUp = batch*full_feature_channels;
int nthreads_DistriCollect = batch*down_feature_h*down_feature_w;
int kThreadsPerBlock = 1024;
float normalization_factor_ = float(down_feature_h * down_feature_w);
//BP top_diff -> Re1_diff
hipLaunchKernelGGL(( UpSampleBackward_gpu), dim3((nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
nthreads_DownUp, full_feature_channels, full_feature_h, full_feature_w, down_feature_h, down_feature_w, kernel, top_diff, Re1_diff);
//BP Re1_diff -> atten1_diff
for (int n=0; n<batch; n++){
float *this_Re1_diff= Re1_diff + n*full_feature_channels*down_feature_h*down_feature_w;
float *this_pool1= pool1 + n*full_feature_channels*full_feature_h*full_feature_w;
float *this_atten1_diff= atten1_diff + n*down_feature_h*down_feature_w*down_feature_h*down_feature_w;
cblas_sgemm(CblasRowMajor, CblasTrans, CblasNoTrans, down_feature_h*down_feature_w, down_feature_h*down_feature_w, full_feature_channels,
float(1.0/normalization_factor_), this_pool1, down_feature_h*down_feature_w,
this_Re1_diff, down_feature_h*down_feature_w,
float(0), this_atten1_diff, down_feature_h*down_feature_w);
}
//BP Re1_diff -> pool1_diff
for (int n=0; n<batch; n++){
float *this_Re1_diff= Re1_diff + n*full_feature_channels*down_feature_h*down_feature_w;
float *this_atten1= atten1 + n*down_feature_h*down_feature_w*down_feature_h*down_feature_w;
float *this_pool1_diff= pool1_diff + n*full_feature_channels*down_feature_h*down_feature_w;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, full_feature_channels, down_feature_h * down_feature_w, down_feature_h*down_feature_w,
float(1.0/normalization_factor_), this_Re1_diff, down_feature_h * down_feature_w,
this_atten1, down_feature_h * down_feature_w,
float(0), this_pool1_diff, down_feature_h * down_feature_w);
}
//BP pool1_diff -> bottom1_diff
hipLaunchKernelGGL(( DownSampleBackward_gpu), dim3((nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
nthreads_DownUp, full_feature_channels, full_feature_h, full_feature_w, down_feature_h,down_feature_w, kernel, pool1_diff,bottom1_diff);
//BP atten1_diff -> bottom2_diff
switch(type){//0:collect 1:distribute
case 0:
bottom2_diff =atten1_diff;
case 1:
hipLaunchKernelGGL(( DistributeBackward_gpu), dim3((nthreads_DistriCollect+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock),0, stream,
nthreads_DistriCollect, down_feature_h, down_feature_w, atten1_diff, bottom2_diff);
}
hipFree(pool1_diff);
hipFree(Re1_diff);
hipFree(atten1_diff);
}
| 765a712bec9755beb561ced3cae77f0b059b6c91.cu | #include <stdio.h>
#include <math.h>
#include <float.h>
#include "cblas.h"
#include "SparsePSA_cuda_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n)\
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;\
i += blockDim.x * gridDim.x)
/******************************************ForwardFunction*******************************/
//DownSample
__global__ void DownSampleForward_gpu(int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *bottom1, float *pool1){//pool1全为0
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
pool1[(n*full_feature_channels + c)*down_feature_h*down_feature_w + h*down_feature_w + w]=//!!!channle
bottom1[(n*full_feature_channels + c)*full_feature_h*full_feature_w + h_index*full_feature_w + w_index];
}
}
}
}
//UpSample
__global__ void UpSampleForward_gpu(int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *Re1, float *top){//top为全0
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
top[(n*full_feature_channels + c)*full_feature_h*full_feature_w + h_index*full_feature_w + w_index]=
Re1[(n*full_feature_channels + c)*down_feature_h*down_feature_w + h*down_feature_h + w];
}
}
}
}
//Distribute
__global__ void DistributeForward_gpu(int nthreads, int down_feature_h, int down_feature_w, float *bottom2, float *atten1){
int feature_channels = down_feature_h*down_feature_w;
CUDA_1D_KERNEL_LOOP(index, nthreads){//channel loop // index??? nthreads???
int w = index % down_feature_w;
int h = (index / down_feature_w) % down_feature_h;//location in map, h&w choose channel
int n = index /down_feature_h/down_feature_w;
for(int hindx=0; h<down_feature_h; h++ ){
for(int windx=0; w<down_feature_w; w++){
atten1[(n*feature_channels + (h*down_feature_w + w))*down_feature_h*down_feature_w + hindx*down_feature_w +windx]=
bottom2[(n*feature_channels + hindx*down_feature_w +windx)*down_feature_h*down_feature_w + h*down_feature_w + w];
}
}
}
}
//main function
void SparsePSAForward_gpu_kernel(int type, int kernel, int batch, int full_feature_channels, int full_feature_h, int full_feature_w,
float *pool, float *Re, float *atten,
float *bottom1, float *bottom2, float *top, cudaStream_t stream){
int down_feature_h=(full_feature_h-(kernel-1))/kernel + 1;
int down_feature_w=(full_feature_w-(kernel-1))/kernel + 1;
pool1 = pool;
Re1 = Re;
atten1 = atten;
int nthreads_DownUp = batch*full_feature_channels;
int nthreads_DistriCollect = batch*down_feature_h*down_feature_w;
int kThreadsPerBlock = 1024;
float normalization_factor_ = float(down_feature_h * down_feature_w);
//bottom1 -> pool1
DownSampleForward_gpu<<<(nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>
(nthreads_DownUp, full_feature_channels, full_feature_h, full_feature_w, down_feature_h, down_feature_w, kernel, bottom1, pool1);//output pool1
//bottom2 -> atten1
switch(type){//0:collect 1:distribute
case 0:
atten1 = bottom2;
case 1:
DistributeForward_gpu<<<(nthreads_DistriCollect + kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>
(nthreads_DistriCollect, down_feature_h, down_feature_w, bottom2, atten1);
}
//pool1&atten1 -> Re1
for(int n = 0; n < batch; n++) {
float *this_pool1 = pool1 + n*full_feature_channels*down_feature_h*down_feature_w;
float *this_atten1 = atten1 + n*(down_feature_h*down_feature_w)*(down_feature_h*down_feature_w);
float *this_Re1 = Re1 + n*full_feature_channels*down_feature_h*down_feature_w;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, full_feature_channels, down_feature_h*down_feature_w, down_feature_h*down_feature_w,
float(1.0/normalization_factor_), this_pool1, down_feature_h*down_feature_w,
this_atten1, down_feature_h*down_feature_w,
float(0), this_Re1, down_feature_h*down_feature_w);
}
//Re1 -> top
UpSampleForward_gpu<<<(nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>
(nthreads_DownUp, full_feature_channels, full_feature_h,full_feature_w ,down_feature_h, down_feature_w, kernel, Re1, top);//output top
}
/**************************************Backward Functioon*****************************/
//DownSample
__global__ void DownSampleBackward_gpu (int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *pool1_diff, float *bottom1_diff){
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
bottom1_diff[(n*full_feature_channels+c)*full_feature_h*full_feature_w + h_index*full_feature_h + w_index]=
pool1_diff[(n*full_feature_channels+c)*down_feature_h*down_feature_w + h*down_feature_h + w];
}
}
}
}
//Upsample
__global__ void UpSampleBackward_gpu(int nthreads, int full_feature_channels, int full_feature_h, int full_feature_w,
int down_feature_h, int down_feature_w, int kernel, float *top_diff, float *Re1_diff){//top为全0
CUDA_1D_KERNEL_LOOP(index, nthreads){
int c = index %full_feature_channels;
int n = index /full_feature_channels;
for(int h=0; h<down_feature_h; h++ ){
for(int w=0; w<down_feature_w; w++){
int h_index=min(h*kernel + kernel/2,full_feature_h-1);
int w_index=min(w*kernel + kernel/2,full_feature_w-1);
Re1_diff[(n*full_feature_channels+c)*down_feature_h*down_feature_w + h*down_feature_h + w] =
top_diff[(n*full_feature_channels+c)*full_feature_h*full_feature_w + h_index*full_feature_h + w_index];
}
}
}
}
//Distribute
__global__ void DistributeBackward_gpu(int nthreads, int down_feature_h, int down_feature_w, float *atten1_diff, float *bottom2_diff){
int feature_channels = down_feature_h*down_feature_w;
CUDA_1D_KERNEL_LOOP(index, nthreads){//channel loop // index??? nthreads???
int w = index % down_feature_w;
int h = (index / down_feature_w) % down_feature_h;//location in map, h&w choose channel
int n = index /down_feature_h/down_feature_w;
for(int hindx=0; h<down_feature_h; h++ ){
for(int windx=0; w<down_feature_w; w++){
bottom2_diff[(n*feature_channels + hindx*down_feature_w +windx)*down_feature_h*down_feature_w + h*down_feature_w + w]=
atten1_diff[(n*feature_channels + (h*down_feature_w + w))*down_feature_h*down_feature_w + hindx*down_feature_w +windx];
}
}
}
}
//main function
void SparsePSABackward_gpu_kernel(int type, int kernel, int batch, int full_feature_channels, int full_feature_h, int full_feature_w,
float *bottom1, float *bottom2, float *top,
float *bottom1_diff, float *bottom2_diff, float *top_diff, cudaStream_t stream){
int down_feature_h=(full_feature_h-(kernel-1))/kernel + 1;
int down_feature_w=(full_feature_w-(kernel-1))/kernel + 1;
float *pool1_diff, *Re1_diff, *atten1_diff;
cudaMallocManaged((void**)&pool1_diff, sizeof(float)*batch*full_feature_channels*down_feature_h*down_feature_w);
cudaMallocManaged((void**)&Re1_diff, sizeof(float)*batch*full_feature_channels*down_feature_h*down_feature_w);
cudaMallocManaged((void**)&atten1_diff, sizeof(float)*batch*down_feature_h*down_feature_w*down_feature_h*down_feature_w);
cudaMemset(pool1,0,sizeof(pool1_diff));
cudaMemset(Re1,0,sizeof(Re1_diff));
cudaMemset(atten1,0,sizeof(atten1_diff));
int nthreads_DownUp = batch*full_feature_channels;
int nthreads_DistriCollect = batch*down_feature_h*down_feature_w;
int kThreadsPerBlock = 1024;
float normalization_factor_ = float(down_feature_h * down_feature_w);
//BP top_diff -> Re1_diff
UpSampleBackward_gpu<<<(nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>
(nthreads_DownUp, full_feature_channels, full_feature_h, full_feature_w, down_feature_h, down_feature_w, kernel, top_diff, Re1_diff);
//BP Re1_diff -> atten1_diff
for (int n=0; n<batch; n++){
float *this_Re1_diff= Re1_diff + n*full_feature_channels*down_feature_h*down_feature_w;
float *this_pool1= pool1 + n*full_feature_channels*full_feature_h*full_feature_w;
float *this_atten1_diff= atten1_diff + n*down_feature_h*down_feature_w*down_feature_h*down_feature_w;
cblas_sgemm(CblasRowMajor, CblasTrans, CblasNoTrans, down_feature_h*down_feature_w, down_feature_h*down_feature_w, full_feature_channels,
float(1.0/normalization_factor_), this_pool1, down_feature_h*down_feature_w,
this_Re1_diff, down_feature_h*down_feature_w,
float(0), this_atten1_diff, down_feature_h*down_feature_w);
}
//BP Re1_diff -> pool1_diff
for (int n=0; n<batch; n++){
float *this_Re1_diff= Re1_diff + n*full_feature_channels*down_feature_h*down_feature_w;
float *this_atten1= atten1 + n*down_feature_h*down_feature_w*down_feature_h*down_feature_w;
float *this_pool1_diff= pool1_diff + n*full_feature_channels*down_feature_h*down_feature_w;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, full_feature_channels, down_feature_h * down_feature_w, down_feature_h*down_feature_w,
float(1.0/normalization_factor_), this_Re1_diff, down_feature_h * down_feature_w,
this_atten1, down_feature_h * down_feature_w,
float(0), this_pool1_diff, down_feature_h * down_feature_w);
}
//BP pool1_diff -> bottom1_diff
DownSampleBackward_gpu<<<(nthreads_DownUp + kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>
(nthreads_DownUp, full_feature_channels, full_feature_h, full_feature_w, down_feature_h,down_feature_w, kernel, pool1_diff,bottom1_diff);
//BP atten1_diff -> bottom2_diff
switch(type){//0:collect 1:distribute
case 0:
bottom2_diff =atten1_diff;
case 1:
DistributeBackward_gpu<<<(nthreads_DistriCollect+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock,0, stream>>>
(nthreads_DistriCollect, down_feature_h, down_feature_w, atten1_diff, bottom2_diff);
}
cudaFree(pool1_diff);
cudaFree(Re1_diff);
cudaFree(atten1_diff);
}
|
83b996d48fc862963626ee18e3235c2c37cabe39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fourier.cuda.h"
#include "constants.h"
#define TH_PER_BLOCK 512
#define ffloat double
// temperature
ffloat T = 1;
// number of harmonics
int host_N = 20;
// grid along phi_y consist of 2*M+1 element
const int host_M = 2559; // 2900; // 2559; // 2175;
//const int host_center_M = host_M/2;
//int host_M = 767;
// time step
//ffloat host_dt = 0.000005;
//ffloat host_dt = 0.000003;
//ffloat host_dt = 0.000025;
ffloat host_dt = 0.0001; //0.0001;
//ffloat host_dt = 0.0005; //0.0001;
// grid step along
ffloat host_dPhi = 0;
ffloat PhiYmax;
ffloat t_max = 5;
#define phi_y(m) (host_dPhi*((m)-host_M-1))
#define dev_phi_y(m) (dPhi*((m)-M-1))
#define nm(pointer, n, m) (*((pointer)+(n)*MSIZE+(m)))
#define dnm(pointer, n, m) (*((pointer)+(n)*dev_MSIZE+(m)))
__device__ ffloat E_dc, E_omega, omega, B, dt, dPhi, nu, nu_tilde, bdt, mu, alpha;
__device__ int M, N, dev_MSIZE, TMSIZE;//, center_M;
__global__ void solve(ffloat *a0, ffloat *a_current, ffloat *b_current,
ffloat *a_next, ffloat *b_next,
ffloat *a_trial, ffloat *b_trial,
ffloat t,
int action);
__global__ void av(ffloat *a, ffloat *b, ffloat *av_data, ffloat t);
ffloat eval_norm(ffloat *host_a, ffloat host_alpha, int MSIZE);
void print_time_evolution_of_parameters(FILE *out, ffloat norm, ffloat *host_a, ffloat *host_b, int MSIZE,
ffloat host_mu, ffloat host_alpha, ffloat host_E_dc, ffloat host_E_omega, ffloat host_omega,
ffloat *host_av_data, ffloat t);
void print_2d_data(FILE *out, int MSIZE, ffloat *host_a0, ffloat *host_a, ffloat *host_b, ffloat host_alpha);
int main(int argc, char *argv[]) {
int display = atoi(argv[1]);
ffloat host_E_dc = strtod(argv[2], NULL);
ffloat host_E_omega = strtod(argv[3], NULL);
ffloat host_omega = strtod(argv[4], NULL);
ffloat T=host_omega>0?(2*PI/host_omega):0;
// sample parameters
ffloat host_mu = strtod(argv[5], NULL);
ffloat host_alpha = strtod(argv[6], NULL);
host_N = atoi(argv[7]);
PhiYmax = strtod(argv[8], NULL);
ffloat host_B = strtod(argv[9], NULL);
ffloat t_start = strtod(argv[10], NULL);
t_max = t_start + T; printf("# t_max = %0.20f\n", t_max);
FILE *out = stdout;
if( argc > 9 ) {
out = fopen(argv[11], "a");
}
host_dPhi = PhiYmax/host_M;
HANDLE_ERROR(hipMemcpyToSymbol(E_dc, &host_E_dc, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(E_omega, &host_E_omega, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(omega, &host_omega, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(B, &host_B, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(dt, &host_dt, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(M, &host_M, sizeof(int)));
HANDLE_ERROR(hipMemcpyToSymbol(N, &host_N, sizeof(int)));
HANDLE_ERROR(hipMemcpyToSymbol(dPhi, &host_dPhi, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(mu, &host_mu, sizeof(ffloat)));
HANDLE_ERROR(hipMemcpyToSymbol(alpha, &host_alpha, sizeof(ffloat)));
const int NSIZE = host_N+1;
const int MSIZE = 2*host_M+3;
const int SIZE_2D = NSIZE*MSIZE;
const int SIZE_2Df = SIZE_2D*sizeof(ffloat);
HANDLE_ERROR(hipMemcpyToSymbol(dev_MSIZE, &MSIZE, sizeof(int)));
// create a0 and populate it with f0
ffloat *host_a0; host_a0 = (ffloat *)calloc(SIZE_2D, sizeof(ffloat));
for( int n=0; n<host_N+1; n++ ) {
ffloat a = gsl_sf_bessel_In(n, host_mu)*(n==0?0.5:1)/(PI*gsl_sf_bessel_In(0, host_mu))*sqrt(host_mu/(2*PI*host_alpha));
for( int m = 0; m < 2*host_M+3; m++ ) {
nm(host_a0, n, m) = a*expl(-host_mu*pow(phi_y(m),2)/2);
}
}
// create device_a0 and transfer data from host_a0 to device_a0
ffloat *a0;
HANDLE_ERROR(hipMalloc((void **)&a0, SIZE_2Df));
HANDLE_ERROR(hipMemcpy(a0, host_a0, SIZE_2Df, hipMemcpyHostToDevice));
// create a and b 2D vectors, three of each one for current, another for next pointer and third one for trial step
ffloat *host_a = (ffloat *)calloc(SIZE_2D, sizeof(ffloat));
ffloat *host_b = (ffloat *)calloc(SIZE_2D, sizeof(ffloat));
ffloat *a[3];
ffloat *b[3];
for( int i = 0; i < 3; i++ ) {
HANDLE_ERROR(hipMalloc((void **)&a[i], SIZE_2Df));
HANDLE_ERROR(hipMalloc((void **)&b[i], SIZE_2Df));
// zero vector b[i]
HANDLE_ERROR(hipMemset((void *)b[i], 0, SIZE_2Df));
// init vectors a[i]
HANDLE_ERROR(hipMemcpy(a[i], host_a0, SIZE_2Df, hipMemcpyHostToDevice));
}
ffloat host_nu = 1+host_dt/2;
HANDLE_ERROR(hipMemcpyToSymbol(nu, &host_nu, sizeof(ffloat)));
ffloat host_nu_tilde = 1-host_dt/2;
HANDLE_ERROR(hipMemcpyToSymbol(nu_tilde, &host_nu_tilde, sizeof(ffloat)));
ffloat host_bdt = host_B*host_dt/(4*host_dPhi);
HANDLE_ERROR(hipMemcpyToSymbol(bdt, &host_bdt, sizeof(ffloat)));
int current = 0; int next = 1;
int host_TMSIZE=2*host_M+1;
HANDLE_ERROR(hipMemcpyToSymbol(TMSIZE, &host_TMSIZE, sizeof(int)));
char *file_name_buf = (char *)calloc(128, sizeof(char));
char buf[16384]; // output buffer
int step = 0;
int blocks = (2*host_M+3)/TH_PER_BLOCK;
ffloat frame_time = 0; int frame_number = 1;
ffloat *host_av_data; host_av_data = (ffloat *)calloc(5, sizeof(ffloat));
ffloat *av_data;
HANDLE_ERROR(hipMalloc((void **)&av_data, 5*sizeof(ffloat)));
HANDLE_ERROR(hipMemset((void *)av_data, 0, 5*sizeof(ffloat)));
for( ffloat t = 0; t < t_max; t += host_dt ) {
// first trial step from 'current' to '2'
hipLaunchKernelGGL(( solve), dim3(blocks),dim3(TH_PER_BLOCK), 0, 0, a0, a[current], b[current], a[next], b[next], a[2], b[2], t, 1);
// then using trial values step forward from 'current' to 'next'
hipLaunchKernelGGL(( solve), dim3(blocks),dim3(TH_PER_BLOCK), 0, 0, a0, a[current], b[current], a[next], b[next], a[2], b[2], t, 2);
if( host_E_omega > 0 && display == 77 && frame_time >= 0.01) {
// we need to perform averaging of v_dr, m_x and A
hipLaunchKernelGGL(( av), dim3(1),dim3(1), 0, 0, a[next], b[next], av_data, t);
HANDLE_ERROR(hipMemcpy(host_a, a[current], SIZE_2Df, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(host_b, b[current], SIZE_2Df, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(host_av_data, av_data, 5*sizeof(ffloat), hipMemcpyDeviceToHost));
ffloat norm = eval_norm(host_a, host_alpha, MSIZE);
print_time_evolution_of_parameters(out, norm, host_a, host_b, MSIZE,
host_mu, host_alpha, host_E_dc, host_E_omega, host_omega,
host_av_data, t);
frame_time = 0;
}
if( host_E_omega > 0 && display != 7 && display != 77 && t >= t_start ) {
// we need to perform averaging of v_dr, m_x and A
hipLaunchKernelGGL(( av), dim3(1),dim3(1), 0, 0, a[next], b[next], av_data, t);
}
if( current == 0 ) { current = 1; next = 0; } else { current = 0; next = 1; }
if( display == 7 && frame_time >= 0.01 ) { // we are making movie
HANDLE_ERROR(hipMemcpy(host_a, a[current], SIZE_2Df, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(host_b, b[current], SIZE_2Df, hipMemcpyDeviceToHost));
sprintf(file_name_buf, "frame%08d.data", frame_number++);
FILE *frame_file_stream = fopen(file_name_buf, "w");
setvbuf(frame_file_stream, buf, _IOFBF, sizeof(buf));
printf("\nWriting frame %s\n", file_name_buf);
print_2d_data(frame_file_stream, MSIZE, host_a0, host_a, host_b, host_alpha);
fclose(frame_file_stream);
frame_time=0;
}
if( out != stdout && display != 7 ) {
step++;
if( step == 100 ) {
printf("\r");
printf("t=%0.9f %0.2f%%", t, t/t_max*100);
sync();
step = 0;
}
}
frame_time += host_dt;
}
HANDLE_ERROR(hipMemcpy(host_a, a[current], SIZE_2Df, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(host_b, b[current], SIZE_2Df, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(host_av_data, av_data, 5*sizeof(ffloat), hipMemcpyDeviceToHost));
ffloat norm = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
norm += nm(host_a,0,m)*host_dPhi;
}
norm *= 2*PI*sqrt(host_alpha);
if( display == 3 ) {
for( ffloat phi_x = -PI; phi_x < PI; phi_x += 0.04 ) {
for( int m = 1; m < 2*host_M+2; m++ ) {
ffloat value = 0;
ffloat value0 = 0;
for( int n = 0; n < host_N+1; n++ ) {
value += nm(host_a,n,m)*cos(n*phi_x) + nm(host_b,n,m)*sin(n*phi_x);
value0 += nm(host_a0,n,m)*cos(n*phi_x);
}
fprintf(out, "%0.5f %0.5f %0.20f %0.20f\n", phi_x, phi_y(m), value<0?0:value, value0<0?0:value0);
}
}
fprintf(out, "# norm=%0.20f\n", norm);
printf("# norm=%0.20f\n", norm);
//if( out != stdout ) { fclose(out); }
return 0;
}
if( display == 4 ) {
printf("\n# norm=%0.20f\n", norm);
ffloat v_dr_inst = 0 ;
ffloat v_y_inst = 0;
ffloat m_over_m_x_inst = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
v_dr_inst += nm(host_b,1,m)*host_dPhi;
v_y_inst += nm(host_a,0,m)*phi_y(m)*host_dPhi;
m_over_m_x_inst += nm(host_a,1,m)*host_dPhi;
}
ffloat v_dr_multiplier = 2*gsl_sf_bessel_I0(host_mu)*PI*sqrt(host_alpha)/gsl_sf_bessel_In(1, host_mu);
ffloat v_y_multiplier = 4*PI*gsl_sf_bessel_I0(host_mu)/gsl_sf_bessel_In(1, host_mu);
ffloat m_over_multiplier = PI*host_alpha*sqrt(host_alpha);
v_dr_inst *= v_dr_multiplier;
v_y_inst *= v_y_multiplier;
m_over_m_x_inst *= m_over_multiplier;
host_av_data[1] *= v_dr_multiplier;
host_av_data[2] *= v_y_multiplier;
host_av_data[3] *= m_over_multiplier;
host_av_data[4] *= v_dr_multiplier;
host_av_data[4] /= T;
fprintf(out, "#E_{dc} \\tilde{E}_{\\omega} \\tilde{\\omega} mu v_{dr}/v_{p} A(\\omega) NORM v_{y}/v_{p} m/m_{x,k} <v_{dr}/v_{p}> <v_{y}/v_{p}> <m/m_{x,k}>\n");
fprintf(out, "%0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f\n", host_E_dc, host_E_omega, host_omega, host_mu, v_dr_inst, host_av_data[4],
norm, v_y_inst, m_over_m_x_inst, host_av_data[1], host_av_data[2], host_av_data[3]);
//if( out != stdout ) {
// fclose(out);
//}
return 0;
}
return 0;
} // end of main(...)
__global__ void solve(ffloat *a0, ffloat *a_current, ffloat *b_current,
ffloat *a_next, ffloat *b_next,
ffloat *a_trial_next, ffloat *b_trial_next,
ffloat t,
int action)
{
const int m = threadIdx.x+blockDim.x*blockIdx.x;
if( m==0 || m > TMSIZE ) { return; } // TMSIZE should be 2*M+1
if( action == 1 ) { // trial step
ffloat mu_t_part = (E_dc + E_omega*cos(omega*t)+B*dev_phi_y(m))*dt/2;
ffloat mu_t_plus_1_part = (E_dc + E_omega*cos(omega*(t+dt))+B*dev_phi_y(m))*dt/2;
for( int n = 0; n < N; n++ ) {
ffloat mu_t = n*mu_t_part;
ffloat mu_t_plus_1 = n*mu_t_plus_1_part;
ffloat g = dt*dnm(a0,n,m)+dnm(a_current,n,m)*nu_tilde-dnm(b_current,n,m)*mu_t +
bdt*( dnm(b_current,n+1,m+1) - dnm(b_current,n+1,m-1) - (n < 2 ? 0 : (dnm(b_current,n-1,m+1) - dnm(b_current,n-1,m-1))) );
ffloat h = dnm(b_current,n,m)*nu_tilde+dnm(a_current,n,m)*mu_t +
bdt*( (n==1?2:1)*(n==0?0:(dnm(a_current,n-1,m+1)-dnm(a_current,n-1,m-1))) - dnm(a_current,n+1,m+1) + dnm(a_current,n+1,m-1) );
ffloat xi = nu*nu + mu_t_plus_1*mu_t_plus_1;
dnm(a_trial_next,n,m) = (g*nu - h*mu_t_plus_1)/xi;
if( n > 0 ) {
dnm(b_trial_next,n,m) = (g*mu_t_plus_1 + h*nu)/xi;
}
}
} else if( action == 2 ) { // real step forward
ffloat mu_t_part = (E_dc + E_omega*cos(omega*t)+B*dev_phi_y(m))*dt/2;
ffloat mu_t_plus_1_part = (E_dc + E_omega*cos(omega*(t+dt))+B*dev_phi_y(m))*dt/2;
for( int n = 0; n < N; n++ ) {
ffloat mu_t = n*mu_t_part;
ffloat mu_t_plus_1 = n*mu_t_plus_1_part;
ffloat g = dt*dnm(a0,n,m)+dnm(a_current,n,m)*nu_tilde-dnm(b_current,n,m)*mu_t +
bdt*( dnm(b_trial_next,n+1,m+1) - dnm(b_trial_next,n+1,m-1) - (n < 2 ? 0 : (dnm(b_trial_next,n-1,m+1) - dnm(b_trial_next,n-1,m-1))) );
ffloat h = dnm(b_current,n,m)*nu_tilde+dnm(a_current,n,m)*mu_t +
bdt*( (n==1?2:1)*(n==0?0:(dnm(a_trial_next,n-1,m+1)-dnm(a_trial_next,n-1,m-1))) - dnm(a_trial_next,n+1,m+1) + dnm(a_trial_next,n+1,m-1) );
ffloat xi = nu*nu + mu_t_plus_1*mu_t_plus_1;
dnm(a_next,n,m) = (g*nu - h*mu_t_plus_1)/xi;
if( n > 0 ) {
dnm(b_next,n,m) = (g*mu_t_plus_1 + h*nu)/xi;
}
}
}
} // end of solve(...)
void print_2d_data(FILE *out, int MSIZE, ffloat *host_a0, ffloat *host_a, ffloat *host_b, ffloat host_alpha) {
ffloat norm = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
norm += nm(host_a,0,m)*host_dPhi;
}
norm *= 2*PI*sqrt(host_alpha);
for( ffloat phi_x = -PI; phi_x < PI; phi_x += 0.01 ) {
for( int m = 1; m < 2*host_M+2; m++ ) {
ffloat value = 0;
//ffloat value0 = 0;
for( int n = 0; n < host_N+1; n++ ) {
value += nm(host_a,n,m)*cos(n*phi_x) + nm(host_b,n,m)*sin(n*phi_x);
//value0 += nm(host_a0,n,m)*cos(n*phi_x);
}
//fprintf(out, "%0.5f %0.5f %0.20f %0.20f\n", phi_x, phi_y(m), value<0?0:value, value0<0?0:value0);
fprintf(out, "%0.5f %0.5f %0.20f\n", phi_x, phi_y(m), value<0?0:value);
}
}
fprintf(out, "# norm=%0.20f\n", norm);
printf("# norm=%0.20f\n", norm);
} // end of solve(...)
__global__ void av(ffloat *a, ffloat *b, ffloat *av_data, ffloat t) {
int av_count = av_data[0] + 1;
ffloat v_dr_inst = 0; ffloat v_y_inst = 0; ffloat m_over_m_x_inst = 0;
for( int m = 1; m < TMSIZE; m++ ) {
v_dr_inst += dnm(b,1,m)*dPhi;
v_y_inst += dnm(a,0,m)*dev_phi_y(m)*dPhi;
m_over_m_x_inst += dnm(a,1,m)*dPhi;
}
//v_dr_av = v_dr_av+(v_dr_inst-v_dr_av)/av_count;
av_data[1] += (v_dr_inst-av_data[1])/av_count; // av_data[1] holds v_dr_av
//v_y_av = v_y_av+(v_y_inst-v_y_av)/av_count;
av_data[2] += (v_y_inst-av_data[2])/av_count; // av_data[2] holds v_y_av
//m_over_m_x_av = m_over_m_x_av+(m_over_m_x_inst-m_over_m_x_av)/av_count;
av_data[3] += (m_over_m_x_inst-av_data[3])/av_count; // av_data[3] holds m_over_m_x_av
//A += cos(omega*t)*v_dr_inst*dt;
av_data[4] += cos(omega*t)*v_dr_inst*dt; // av_data[4] holds absorption A
av_data[0] += 1;
} // end of sum(...)
void print_time_evolution_of_parameters(FILE *out, ffloat norm, ffloat *host_a, ffloat *host_b, int MSIZE,
ffloat host_mu, ffloat host_alpha, ffloat host_E_dc, ffloat host_E_omega, ffloat host_omega,
ffloat *host_av_data, ffloat t)
{
printf("\n# t=%0.20f norm=%0.20f\n", t, norm);
ffloat v_dr_inst = 0 ;
ffloat v_y_inst = 0;
ffloat m_over_m_x_inst = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
v_dr_inst += nm(host_b,1,m)*host_dPhi;
v_y_inst += nm(host_a,0,m)*phi_y(m)*host_dPhi;
m_over_m_x_inst += nm(host_a,1,m)*host_dPhi;
}
ffloat v_dr_multiplier = 2*gsl_sf_bessel_I0(host_mu)*PI*sqrt(host_alpha)/gsl_sf_bessel_In(1, host_mu);
ffloat v_y_multiplier = 4*PI*gsl_sf_bessel_I0(host_mu)/gsl_sf_bessel_In(1, host_mu);
ffloat m_over_multiplier = PI*host_alpha*sqrt(host_alpha);
v_dr_inst *= v_dr_multiplier;
v_y_inst *= v_y_multiplier;
m_over_m_x_inst *= m_over_multiplier;
host_av_data[1] *= v_dr_multiplier;
host_av_data[2] *= v_y_multiplier;
host_av_data[3] *= m_over_multiplier;
host_av_data[4] *= v_dr_multiplier;
host_av_data[4] /= t;
fprintf(out, "#E_{dc} \\tilde{E}_{\\omega} \\tilde{\\omega} mu v_{dr}/v_{p} A(\\omega) NORM v_{y}/v_{p} m/m_{x,k} <v_{dr}/v_{p}> <v_{y}/v_{p}> <m/m_{x,k}> A_{inst} t\n");
fprintf(out, "%0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f\n",
host_E_dc, host_E_omega, host_omega, host_mu, v_dr_inst, host_av_data[4], norm, v_y_inst,
m_over_m_x_inst, host_av_data[1], host_av_data[2], host_av_data[3], cos(host_omega*t)*v_dr_inst, t);
} // end of print_time_evolution_of_parameters(...)
ffloat eval_norm(ffloat *host_a, ffloat host_alpha, int MSIZE) {
ffloat norm = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
norm += nm(host_a,0,m)*host_dPhi;
}
norm *= 2*PI*sqrt(host_alpha);
return norm;
} // end of eval_norm(...)
| 83b996d48fc862963626ee18e3235c2c37cabe39.cu | #include "fourier.cuda.h"
#include "constants.h"
#define TH_PER_BLOCK 512
#define ffloat double
// temperature
ffloat T = 1;
// number of harmonics
int host_N = 20;
// grid along phi_y consist of 2*M+1 element
const int host_M = 2559; // 2900; // 2559; // 2175;
//const int host_center_M = host_M/2;
//int host_M = 767;
// time step
//ffloat host_dt = 0.000005;
//ffloat host_dt = 0.000003;
//ffloat host_dt = 0.000025;
ffloat host_dt = 0.0001; //0.0001;
//ffloat host_dt = 0.0005; //0.0001;
// grid step along
ffloat host_dPhi = 0;
ffloat PhiYmax;
ffloat t_max = 5;
#define phi_y(m) (host_dPhi*((m)-host_M-1))
#define dev_phi_y(m) (dPhi*((m)-M-1))
#define nm(pointer, n, m) (*((pointer)+(n)*MSIZE+(m)))
#define dnm(pointer, n, m) (*((pointer)+(n)*dev_MSIZE+(m)))
__device__ ffloat E_dc, E_omega, omega, B, dt, dPhi, nu, nu_tilde, bdt, mu, alpha;
__device__ int M, N, dev_MSIZE, TMSIZE;//, center_M;
__global__ void solve(ffloat *a0, ffloat *a_current, ffloat *b_current,
ffloat *a_next, ffloat *b_next,
ffloat *a_trial, ffloat *b_trial,
ffloat t,
int action);
__global__ void av(ffloat *a, ffloat *b, ffloat *av_data, ffloat t);
ffloat eval_norm(ffloat *host_a, ffloat host_alpha, int MSIZE);
void print_time_evolution_of_parameters(FILE *out, ffloat norm, ffloat *host_a, ffloat *host_b, int MSIZE,
ffloat host_mu, ffloat host_alpha, ffloat host_E_dc, ffloat host_E_omega, ffloat host_omega,
ffloat *host_av_data, ffloat t);
void print_2d_data(FILE *out, int MSIZE, ffloat *host_a0, ffloat *host_a, ffloat *host_b, ffloat host_alpha);
int main(int argc, char *argv[]) {
int display = atoi(argv[1]);
ffloat host_E_dc = strtod(argv[2], NULL);
ffloat host_E_omega = strtod(argv[3], NULL);
ffloat host_omega = strtod(argv[4], NULL);
ffloat T=host_omega>0?(2*PI/host_omega):0;
// sample parameters
ffloat host_mu = strtod(argv[5], NULL);
ffloat host_alpha = strtod(argv[6], NULL);
host_N = atoi(argv[7]);
PhiYmax = strtod(argv[8], NULL);
ffloat host_B = strtod(argv[9], NULL);
ffloat t_start = strtod(argv[10], NULL);
t_max = t_start + T; printf("# t_max = %0.20f\n", t_max);
FILE *out = stdout;
if( argc > 9 ) {
out = fopen(argv[11], "a");
}
host_dPhi = PhiYmax/host_M;
HANDLE_ERROR(cudaMemcpyToSymbol(E_dc, &host_E_dc, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(E_omega, &host_E_omega, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(omega, &host_omega, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(B, &host_B, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(dt, &host_dt, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(M, &host_M, sizeof(int)));
HANDLE_ERROR(cudaMemcpyToSymbol(N, &host_N, sizeof(int)));
HANDLE_ERROR(cudaMemcpyToSymbol(dPhi, &host_dPhi, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(mu, &host_mu, sizeof(ffloat)));
HANDLE_ERROR(cudaMemcpyToSymbol(alpha, &host_alpha, sizeof(ffloat)));
const int NSIZE = host_N+1;
const int MSIZE = 2*host_M+3;
const int SIZE_2D = NSIZE*MSIZE;
const int SIZE_2Df = SIZE_2D*sizeof(ffloat);
HANDLE_ERROR(cudaMemcpyToSymbol(dev_MSIZE, &MSIZE, sizeof(int)));
// create a0 and populate it with f0
ffloat *host_a0; host_a0 = (ffloat *)calloc(SIZE_2D, sizeof(ffloat));
for( int n=0; n<host_N+1; n++ ) {
ffloat a = gsl_sf_bessel_In(n, host_mu)*(n==0?0.5:1)/(PI*gsl_sf_bessel_In(0, host_mu))*sqrt(host_mu/(2*PI*host_alpha));
for( int m = 0; m < 2*host_M+3; m++ ) {
nm(host_a0, n, m) = a*expl(-host_mu*pow(phi_y(m),2)/2);
}
}
// create device_a0 and transfer data from host_a0 to device_a0
ffloat *a0;
HANDLE_ERROR(cudaMalloc((void **)&a0, SIZE_2Df));
HANDLE_ERROR(cudaMemcpy(a0, host_a0, SIZE_2Df, cudaMemcpyHostToDevice));
// create a and b 2D vectors, three of each one for current, another for next pointer and third one for trial step
ffloat *host_a = (ffloat *)calloc(SIZE_2D, sizeof(ffloat));
ffloat *host_b = (ffloat *)calloc(SIZE_2D, sizeof(ffloat));
ffloat *a[3];
ffloat *b[3];
for( int i = 0; i < 3; i++ ) {
HANDLE_ERROR(cudaMalloc((void **)&a[i], SIZE_2Df));
HANDLE_ERROR(cudaMalloc((void **)&b[i], SIZE_2Df));
// zero vector b[i]
HANDLE_ERROR(cudaMemset((void *)b[i], 0, SIZE_2Df));
// init vectors a[i]
HANDLE_ERROR(cudaMemcpy(a[i], host_a0, SIZE_2Df, cudaMemcpyHostToDevice));
}
ffloat host_nu = 1+host_dt/2;
HANDLE_ERROR(cudaMemcpyToSymbol(nu, &host_nu, sizeof(ffloat)));
ffloat host_nu_tilde = 1-host_dt/2;
HANDLE_ERROR(cudaMemcpyToSymbol(nu_tilde, &host_nu_tilde, sizeof(ffloat)));
ffloat host_bdt = host_B*host_dt/(4*host_dPhi);
HANDLE_ERROR(cudaMemcpyToSymbol(bdt, &host_bdt, sizeof(ffloat)));
int current = 0; int next = 1;
int host_TMSIZE=2*host_M+1;
HANDLE_ERROR(cudaMemcpyToSymbol(TMSIZE, &host_TMSIZE, sizeof(int)));
char *file_name_buf = (char *)calloc(128, sizeof(char));
char buf[16384]; // output buffer
int step = 0;
int blocks = (2*host_M+3)/TH_PER_BLOCK;
ffloat frame_time = 0; int frame_number = 1;
ffloat *host_av_data; host_av_data = (ffloat *)calloc(5, sizeof(ffloat));
ffloat *av_data;
HANDLE_ERROR(cudaMalloc((void **)&av_data, 5*sizeof(ffloat)));
HANDLE_ERROR(cudaMemset((void *)av_data, 0, 5*sizeof(ffloat)));
for( ffloat t = 0; t < t_max; t += host_dt ) {
// first trial step from 'current' to '2'
solve<<<blocks,TH_PER_BLOCK>>>(a0, a[current], b[current], a[next], b[next], a[2], b[2], t, 1);
// then using trial values step forward from 'current' to 'next'
solve<<<blocks,TH_PER_BLOCK>>>(a0, a[current], b[current], a[next], b[next], a[2], b[2], t, 2);
if( host_E_omega > 0 && display == 77 && frame_time >= 0.01) {
// we need to perform averaging of v_dr, m_x and A
av<<<1,1>>>(a[next], b[next], av_data, t);
HANDLE_ERROR(cudaMemcpy(host_a, a[current], SIZE_2Df, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(host_b, b[current], SIZE_2Df, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(host_av_data, av_data, 5*sizeof(ffloat), cudaMemcpyDeviceToHost));
ffloat norm = eval_norm(host_a, host_alpha, MSIZE);
print_time_evolution_of_parameters(out, norm, host_a, host_b, MSIZE,
host_mu, host_alpha, host_E_dc, host_E_omega, host_omega,
host_av_data, t);
frame_time = 0;
}
if( host_E_omega > 0 && display != 7 && display != 77 && t >= t_start ) {
// we need to perform averaging of v_dr, m_x and A
av<<<1,1>>>(a[next], b[next], av_data, t);
}
if( current == 0 ) { current = 1; next = 0; } else { current = 0; next = 1; }
if( display == 7 && frame_time >= 0.01 ) { // we are making movie
HANDLE_ERROR(cudaMemcpy(host_a, a[current], SIZE_2Df, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(host_b, b[current], SIZE_2Df, cudaMemcpyDeviceToHost));
sprintf(file_name_buf, "frame%08d.data", frame_number++);
FILE *frame_file_stream = fopen(file_name_buf, "w");
setvbuf(frame_file_stream, buf, _IOFBF, sizeof(buf));
printf("\nWriting frame %s\n", file_name_buf);
print_2d_data(frame_file_stream, MSIZE, host_a0, host_a, host_b, host_alpha);
fclose(frame_file_stream);
frame_time=0;
}
if( out != stdout && display != 7 ) {
step++;
if( step == 100 ) {
printf("\r");
printf("t=%0.9f %0.2f%%", t, t/t_max*100);
sync();
step = 0;
}
}
frame_time += host_dt;
}
HANDLE_ERROR(cudaMemcpy(host_a, a[current], SIZE_2Df, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(host_b, b[current], SIZE_2Df, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(host_av_data, av_data, 5*sizeof(ffloat), cudaMemcpyDeviceToHost));
ffloat norm = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
norm += nm(host_a,0,m)*host_dPhi;
}
norm *= 2*PI*sqrt(host_alpha);
if( display == 3 ) {
for( ffloat phi_x = -PI; phi_x < PI; phi_x += 0.04 ) {
for( int m = 1; m < 2*host_M+2; m++ ) {
ffloat value = 0;
ffloat value0 = 0;
for( int n = 0; n < host_N+1; n++ ) {
value += nm(host_a,n,m)*cos(n*phi_x) + nm(host_b,n,m)*sin(n*phi_x);
value0 += nm(host_a0,n,m)*cos(n*phi_x);
}
fprintf(out, "%0.5f %0.5f %0.20f %0.20f\n", phi_x, phi_y(m), value<0?0:value, value0<0?0:value0);
}
}
fprintf(out, "# norm=%0.20f\n", norm);
printf("# norm=%0.20f\n", norm);
//if( out != stdout ) { fclose(out); }
return 0;
}
if( display == 4 ) {
printf("\n# norm=%0.20f\n", norm);
ffloat v_dr_inst = 0 ;
ffloat v_y_inst = 0;
ffloat m_over_m_x_inst = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
v_dr_inst += nm(host_b,1,m)*host_dPhi;
v_y_inst += nm(host_a,0,m)*phi_y(m)*host_dPhi;
m_over_m_x_inst += nm(host_a,1,m)*host_dPhi;
}
ffloat v_dr_multiplier = 2*gsl_sf_bessel_I0(host_mu)*PI*sqrt(host_alpha)/gsl_sf_bessel_In(1, host_mu);
ffloat v_y_multiplier = 4*PI*gsl_sf_bessel_I0(host_mu)/gsl_sf_bessel_In(1, host_mu);
ffloat m_over_multiplier = PI*host_alpha*sqrt(host_alpha);
v_dr_inst *= v_dr_multiplier;
v_y_inst *= v_y_multiplier;
m_over_m_x_inst *= m_over_multiplier;
host_av_data[1] *= v_dr_multiplier;
host_av_data[2] *= v_y_multiplier;
host_av_data[3] *= m_over_multiplier;
host_av_data[4] *= v_dr_multiplier;
host_av_data[4] /= T;
fprintf(out, "#E_{dc} \\tilde{E}_{\\omega} \\tilde{\\omega} mu v_{dr}/v_{p} A(\\omega) NORM v_{y}/v_{p} m/m_{x,k} <v_{dr}/v_{p}> <v_{y}/v_{p}> <m/m_{x,k}>\n");
fprintf(out, "%0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f\n", host_E_dc, host_E_omega, host_omega, host_mu, v_dr_inst, host_av_data[4],
norm, v_y_inst, m_over_m_x_inst, host_av_data[1], host_av_data[2], host_av_data[3]);
//if( out != stdout ) {
// fclose(out);
//}
return 0;
}
return 0;
} // end of main(...)
__global__ void solve(ffloat *a0, ffloat *a_current, ffloat *b_current,
ffloat *a_next, ffloat *b_next,
ffloat *a_trial_next, ffloat *b_trial_next,
ffloat t,
int action)
{
const int m = threadIdx.x+blockDim.x*blockIdx.x;
if( m==0 || m > TMSIZE ) { return; } // TMSIZE should be 2*M+1
if( action == 1 ) { // trial step
ffloat mu_t_part = (E_dc + E_omega*cos(omega*t)+B*dev_phi_y(m))*dt/2;
ffloat mu_t_plus_1_part = (E_dc + E_omega*cos(omega*(t+dt))+B*dev_phi_y(m))*dt/2;
for( int n = 0; n < N; n++ ) {
ffloat mu_t = n*mu_t_part;
ffloat mu_t_plus_1 = n*mu_t_plus_1_part;
ffloat g = dt*dnm(a0,n,m)+dnm(a_current,n,m)*nu_tilde-dnm(b_current,n,m)*mu_t +
bdt*( dnm(b_current,n+1,m+1) - dnm(b_current,n+1,m-1) - (n < 2 ? 0 : (dnm(b_current,n-1,m+1) - dnm(b_current,n-1,m-1))) );
ffloat h = dnm(b_current,n,m)*nu_tilde+dnm(a_current,n,m)*mu_t +
bdt*( (n==1?2:1)*(n==0?0:(dnm(a_current,n-1,m+1)-dnm(a_current,n-1,m-1))) - dnm(a_current,n+1,m+1) + dnm(a_current,n+1,m-1) );
ffloat xi = nu*nu + mu_t_plus_1*mu_t_plus_1;
dnm(a_trial_next,n,m) = (g*nu - h*mu_t_plus_1)/xi;
if( n > 0 ) {
dnm(b_trial_next,n,m) = (g*mu_t_plus_1 + h*nu)/xi;
}
}
} else if( action == 2 ) { // real step forward
ffloat mu_t_part = (E_dc + E_omega*cos(omega*t)+B*dev_phi_y(m))*dt/2;
ffloat mu_t_plus_1_part = (E_dc + E_omega*cos(omega*(t+dt))+B*dev_phi_y(m))*dt/2;
for( int n = 0; n < N; n++ ) {
ffloat mu_t = n*mu_t_part;
ffloat mu_t_plus_1 = n*mu_t_plus_1_part;
ffloat g = dt*dnm(a0,n,m)+dnm(a_current,n,m)*nu_tilde-dnm(b_current,n,m)*mu_t +
bdt*( dnm(b_trial_next,n+1,m+1) - dnm(b_trial_next,n+1,m-1) - (n < 2 ? 0 : (dnm(b_trial_next,n-1,m+1) - dnm(b_trial_next,n-1,m-1))) );
ffloat h = dnm(b_current,n,m)*nu_tilde+dnm(a_current,n,m)*mu_t +
bdt*( (n==1?2:1)*(n==0?0:(dnm(a_trial_next,n-1,m+1)-dnm(a_trial_next,n-1,m-1))) - dnm(a_trial_next,n+1,m+1) + dnm(a_trial_next,n+1,m-1) );
ffloat xi = nu*nu + mu_t_plus_1*mu_t_plus_1;
dnm(a_next,n,m) = (g*nu - h*mu_t_plus_1)/xi;
if( n > 0 ) {
dnm(b_next,n,m) = (g*mu_t_plus_1 + h*nu)/xi;
}
}
}
} // end of solve(...)
void print_2d_data(FILE *out, int MSIZE, ffloat *host_a0, ffloat *host_a, ffloat *host_b, ffloat host_alpha) {
ffloat norm = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
norm += nm(host_a,0,m)*host_dPhi;
}
norm *= 2*PI*sqrt(host_alpha);
for( ffloat phi_x = -PI; phi_x < PI; phi_x += 0.01 ) {
for( int m = 1; m < 2*host_M+2; m++ ) {
ffloat value = 0;
//ffloat value0 = 0;
for( int n = 0; n < host_N+1; n++ ) {
value += nm(host_a,n,m)*cos(n*phi_x) + nm(host_b,n,m)*sin(n*phi_x);
//value0 += nm(host_a0,n,m)*cos(n*phi_x);
}
//fprintf(out, "%0.5f %0.5f %0.20f %0.20f\n", phi_x, phi_y(m), value<0?0:value, value0<0?0:value0);
fprintf(out, "%0.5f %0.5f %0.20f\n", phi_x, phi_y(m), value<0?0:value);
}
}
fprintf(out, "# norm=%0.20f\n", norm);
printf("# norm=%0.20f\n", norm);
} // end of solve(...)
__global__ void av(ffloat *a, ffloat *b, ffloat *av_data, ffloat t) {
int av_count = av_data[0] + 1;
ffloat v_dr_inst = 0; ffloat v_y_inst = 0; ffloat m_over_m_x_inst = 0;
for( int m = 1; m < TMSIZE; m++ ) {
v_dr_inst += dnm(b,1,m)*dPhi;
v_y_inst += dnm(a,0,m)*dev_phi_y(m)*dPhi;
m_over_m_x_inst += dnm(a,1,m)*dPhi;
}
//v_dr_av = v_dr_av+(v_dr_inst-v_dr_av)/av_count;
av_data[1] += (v_dr_inst-av_data[1])/av_count; // av_data[1] holds v_dr_av
//v_y_av = v_y_av+(v_y_inst-v_y_av)/av_count;
av_data[2] += (v_y_inst-av_data[2])/av_count; // av_data[2] holds v_y_av
//m_over_m_x_av = m_over_m_x_av+(m_over_m_x_inst-m_over_m_x_av)/av_count;
av_data[3] += (m_over_m_x_inst-av_data[3])/av_count; // av_data[3] holds m_over_m_x_av
//A += cos(omega*t)*v_dr_inst*dt;
av_data[4] += cos(omega*t)*v_dr_inst*dt; // av_data[4] holds absorption A
av_data[0] += 1;
} // end of sum(...)
void print_time_evolution_of_parameters(FILE *out, ffloat norm, ffloat *host_a, ffloat *host_b, int MSIZE,
ffloat host_mu, ffloat host_alpha, ffloat host_E_dc, ffloat host_E_omega, ffloat host_omega,
ffloat *host_av_data, ffloat t)
{
printf("\n# t=%0.20f norm=%0.20f\n", t, norm);
ffloat v_dr_inst = 0 ;
ffloat v_y_inst = 0;
ffloat m_over_m_x_inst = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
v_dr_inst += nm(host_b,1,m)*host_dPhi;
v_y_inst += nm(host_a,0,m)*phi_y(m)*host_dPhi;
m_over_m_x_inst += nm(host_a,1,m)*host_dPhi;
}
ffloat v_dr_multiplier = 2*gsl_sf_bessel_I0(host_mu)*PI*sqrt(host_alpha)/gsl_sf_bessel_In(1, host_mu);
ffloat v_y_multiplier = 4*PI*gsl_sf_bessel_I0(host_mu)/gsl_sf_bessel_In(1, host_mu);
ffloat m_over_multiplier = PI*host_alpha*sqrt(host_alpha);
v_dr_inst *= v_dr_multiplier;
v_y_inst *= v_y_multiplier;
m_over_m_x_inst *= m_over_multiplier;
host_av_data[1] *= v_dr_multiplier;
host_av_data[2] *= v_y_multiplier;
host_av_data[3] *= m_over_multiplier;
host_av_data[4] *= v_dr_multiplier;
host_av_data[4] /= t;
fprintf(out, "#E_{dc} \\tilde{E}_{\\omega} \\tilde{\\omega} mu v_{dr}/v_{p} A(\\omega) NORM v_{y}/v_{p} m/m_{x,k} <v_{dr}/v_{p}> <v_{y}/v_{p}> <m/m_{x,k}> A_{inst} t\n");
fprintf(out, "%0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f %0.20f\n",
host_E_dc, host_E_omega, host_omega, host_mu, v_dr_inst, host_av_data[4], norm, v_y_inst,
m_over_m_x_inst, host_av_data[1], host_av_data[2], host_av_data[3], cos(host_omega*t)*v_dr_inst, t);
} // end of print_time_evolution_of_parameters(...)
ffloat eval_norm(ffloat *host_a, ffloat host_alpha, int MSIZE) {
ffloat norm = 0;
for( int m = 1; m < 2*host_M+2; m++ ) {
norm += nm(host_a,0,m)*host_dPhi;
}
norm *= 2*PI*sqrt(host_alpha);
return norm;
} // end of eval_norm(...)
|
c8a7650d6d7ce32cd497482da7b5218586e03391.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../../C/dballoc.h"
#include "../../../C/dbmem.h"
#include "../../../C/dbquery.h"
#include "../../../C/dbdata.h"
#include "stdio.h"
#define MAX 10
#define R 1000
#define C 5
#define FIELD 2
__global__
void initialize_db_on_cuda(void *d_db, gint *d_size) {
printf("\nInit");
if (threadIdx.x == 0) {
d_db = wg_attach_local_cuda_database(*d_size);
}
}
__global__
void delete_db_on_cuda(void *d_db) {
if (threadIdx.x == 0) {
wg_delete_local_cuda_database(d_db);
}
}
__global__
void GpuCount(void *d_db, int *d_count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
wg_int target = 10;
if (id < R) {
void *rec = wg_cuda_find_record_int(d_db, 0, WG_COND_EQUAL, id, NULL);
wg_int value = wg_cuda_decode_int(d_db,
wg_cuda_get_field(d_db, rec, 2));
if (value == target) {
atomicAdd(d_count, 1);
}
}
}
__global__ void createTable(void *d_db, int *d_value) {
wg_int enc;
void *rec;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < R) {
rec = wg_cuda_create_record(d_db, C);
if (!rec) {
printf("\nFailed to create database - not enough memory");
wg_delete_local_cuda_database(d_db);
}
enc = wg_cuda_encode_int(d_db, *d_value);
wg_cuda_set_field(d_db, rec, 2, enc);
}
}
int main() {
void *d_db;
gint size = 2000000;
gint *h_size = &size;
void *h_db = malloc(sizeof(db_handle));
int value = 1;
int count = 0;
int *h_count = &count;
int *h_value = &value;
int *d_value;
gint *d_size;
int *d_count;
if (hipMalloc((void **) &d_db, sizeof(db_handle)) != hipSuccess) {
printf("Failed to allocate memory for db_handle to the GPU");
return 0;
}
if (hipMalloc((void **) &d_size, sizeof(gint)) != hipSuccess) {
printf("Failed to allocate memory for d_size to the GPU");
return 0;
}
if (hipMemcpy(d_size, h_size, sizeof(gint), hipMemcpyHostToDevice)
!= hipSuccess) {
printf("\nFailed to copy size pointer to GPU");
return 0;
}
printf("\nWe are here before init");
hipLaunchKernelGGL(( initialize_db_on_cuda), dim3(1), dim3(32), 0, 0, d_db, d_size);
hipDeviceSynchronize();
if (hipMalloc((void **) &d_value, sizeof(int)) != hipSuccess) {
printf("Failed to allocate memory for d_value to the GPU");
return 0;
}
if (hipMemcpy(d_value, h_value, sizeof(int), hipMemcpyHostToDevice)
!= hipSuccess) {
printf("\nFailed to copy d_value pointer to GPU");
return 0;
}
hipLaunchKernelGGL(( createTable), dim3(1), dim3(32), 0, 0, d_db, d_value);
hipDeviceSynchronize();
hipLaunchKernelGGL(( GpuCount), dim3(1),dim3(32), 0, 0, d_db, d_count);
hipDeviceSynchronize();
printf("\nThe end");
return 0;
}
| c8a7650d6d7ce32cd497482da7b5218586e03391.cu | #include "../../../C/dballoc.h"
#include "../../../C/dbmem.h"
#include "../../../C/dbquery.h"
#include "../../../C/dbdata.h"
#include "stdio.h"
#define MAX 10
#define R 1000
#define C 5
#define FIELD 2
__global__
void initialize_db_on_cuda(void *d_db, gint *d_size) {
printf("\nInit");
if (threadIdx.x == 0) {
d_db = wg_attach_local_cuda_database(*d_size);
}
}
__global__
void delete_db_on_cuda(void *d_db) {
if (threadIdx.x == 0) {
wg_delete_local_cuda_database(d_db);
}
}
__global__
void GpuCount(void *d_db, int *d_count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
wg_int target = 10;
if (id < R) {
void *rec = wg_cuda_find_record_int(d_db, 0, WG_COND_EQUAL, id, NULL);
wg_int value = wg_cuda_decode_int(d_db,
wg_cuda_get_field(d_db, rec, 2));
if (value == target) {
atomicAdd(d_count, 1);
}
}
}
__global__ void createTable(void *d_db, int *d_value) {
wg_int enc;
void *rec;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < R) {
rec = wg_cuda_create_record(d_db, C);
if (!rec) {
printf("\nFailed to create database - not enough memory");
wg_delete_local_cuda_database(d_db);
}
enc = wg_cuda_encode_int(d_db, *d_value);
wg_cuda_set_field(d_db, rec, 2, enc);
}
}
int main() {
void *d_db;
gint size = 2000000;
gint *h_size = &size;
void *h_db = malloc(sizeof(db_handle));
int value = 1;
int count = 0;
int *h_count = &count;
int *h_value = &value;
int *d_value;
gint *d_size;
int *d_count;
if (cudaMalloc((void **) &d_db, sizeof(db_handle)) != cudaSuccess) {
printf("Failed to allocate memory for db_handle to the GPU");
return 0;
}
if (cudaMalloc((void **) &d_size, sizeof(gint)) != cudaSuccess) {
printf("Failed to allocate memory for d_size to the GPU");
return 0;
}
if (cudaMemcpy(d_size, h_size, sizeof(gint), cudaMemcpyHostToDevice)
!= cudaSuccess) {
printf("\nFailed to copy size pointer to GPU");
return 0;
}
printf("\nWe are here before init");
initialize_db_on_cuda<<<1, 32>>>(d_db, d_size);
cudaDeviceSynchronize();
if (cudaMalloc((void **) &d_value, sizeof(int)) != cudaSuccess) {
printf("Failed to allocate memory for d_value to the GPU");
return 0;
}
if (cudaMemcpy(d_value, h_value, sizeof(int), cudaMemcpyHostToDevice)
!= cudaSuccess) {
printf("\nFailed to copy d_value pointer to GPU");
return 0;
}
createTable<<<1, 32>>>(d_db, d_value);
cudaDeviceSynchronize();
GpuCount<<<1,32>>>(d_db, d_count);
cudaDeviceSynchronize();
printf("\nThe end");
return 0;
}
|
814f6f2aee911e10f479f50f0144cd68603fbf43.hip | // !!! This is a file automatically generated by hipify!!!
#include "misc.h"
#include "gpusearch.h"
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
static hipError_t crc;
int nblock_size = 512;
int maxgsx = 65535;
__device__ char *kmbwt, *kmsr;
__device__ unsigned int *kmocc, *kmpsa;
__device__ unsigned int *kmsml;
// given a letter, return its rank in the alphabet
__device__ inline
unsigned int alpha_rank(char l)
{
// rank alphabetically $<a<c<g<t
switch(l)
{
case '$': return 0;
case 'a': return 1;
case 'c': return 2;
case 'g': return 3;
case 't': return 4;
}
return 5;
}
// get the bp at pos
__device__ inline
char gpu_get_bp_2bit(char* genome, unsigned int pos)
{
unsigned long long int bit_pos = pos * ENCODE_SIZE_2BIT;
unsigned long long int byte_pos = bit_pos / BYTE_SIZE;
unsigned long long int byte_off = bit_pos - byte_pos * BYTE_SIZE;
unsigned char dna = genome[byte_pos];
dna = dna << byte_off;
dna = dna >> (BYTE_SIZE - ENCODE_SIZE_2BIT);
switch(dna)
{
case 0: return 'a';
case 1: return 'c';
case 2: return 'g';
case 3: return 't';
}
return '*';
}
// write a bp at a position
__device__ inline
void gpu_write_bp_2bit(char* genome, unsigned int pos, char val)
{
unsigned long long int bit_pos = pos * ENCODE_SIZE_2BIT;
unsigned long long int byte_pos = bit_pos / BYTE_SIZE;
unsigned long long int byte_off = bit_pos - byte_pos * BYTE_SIZE;
switch(val)
{
case 'a':
{
char mask = 0 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
case 'c':
{
char mask = 1 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
case 'g':
{
char mask = 2 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
case 't':
{
char mask = 3 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
}
}
// given a bwt and a partial occ array, find the actual occ value
__device__ inline
unsigned int gpu_get_occ(long long int pos, char alpha)
{
if(pos < 0)
return 0;
unsigned int occ_off = pos / SAMPLE_SIZE_TEST;
long long int occi = 0;
switch(alpha)
{
case 'a': occi = kmocc[occ_off*ALPHA_SIZE+0]; break;
case 'c': occi = kmocc[occ_off*ALPHA_SIZE+1]; break;
case 'g': occi = kmocc[occ_off*ALPHA_SIZE+2]; break;
case 't': occi = kmocc[occ_off*ALPHA_SIZE+3]; break;
}
unsigned int ext_cnt = 0;
for(unsigned int i = occ_off * SAMPLE_SIZE_TEST + 1;
i < pos && i < (unsigned int)GENOME_SIZE_TEST; i++)
{
char bp = '*';
if(i < BWT_DPOS)
bp = gpu_get_bp_2bit(kmbwt, i);
else if(i == BWT_DPOS)
bp = '$';
else
bp = gpu_get_bp_2bit(kmbwt, i-1);
if(bp == alpha)
ext_cnt++;
}
occi += ext_cnt;
if(pos % SAMPLE_SIZE_TEST == 0)
{
char bp = '*';
if(pos < BWT_DPOS)
bp = gpu_get_bp_2bit(kmbwt, pos);
else if(pos == BWT_DPOS)
bp = '$';
else
bp = gpu_get_bp_2bit(kmbwt, pos-1);
if(bp == alpha)
occi--;
if(occi < 0)
occi = 0;
}
return (unsigned int)occi;
}
// get suffix array value
__device__ inline
unsigned int gpu_get_sa_val(unsigned int pos)
{
unsigned int nmov = 0;
while(pos != BWT_DPOS)
{
if(pos % SAMPLE_SIZE_TEST == 0)
{
nmov += kmpsa[pos/SAMPLE_SIZE_TEST];
break;
}
char bp = '*';
if(pos < BWT_DPOS)
bp = gpu_get_bp_2bit(kmbwt, pos);
else if(pos == BWT_DPOS)
bp = '$';
else
bp = gpu_get_bp_2bit(kmbwt, pos-1);
pos = kmsml[alpha_rank(bp)] +
gpu_get_occ((long long int)pos,bp);
nmov++;
}
return nmov;
}
// k-mismatch search
__device__ inline
void gpu_kmismatch(int kerr,unsigned int re,
unsigned int sp,unsigned int ep,unsigned int* ans)
{
/*
for(long long int i = re; i >= (long long int)rs && ep >= sp; i--)
{
char bp = gpu_get_bp_2bit(sr, i);
sp = sml[alpha_rank(bp)] + 1 +
gpu_get_occ(bwt,occ,genome_size,sample_size,(long long int)sp-1,bp);
ep = sml[alpha_rank(bp)] +
gpu_get_occ(bwt,occ,genome_size,sample_size,(long long int)ep,bp);
}
if(sp <= ep)
ans[0] = psa[sp-1];
return 0;
*/
if(sp > ep)
return;
if(re % READ_SIZE == 0)
{
ans[0] = gpu_get_sa_val(sp-1)-1;
return;
}
unsigned int sp2 = kmsml[alpha_rank('a')] + 1 +
gpu_get_occ((long long int)sp-1,'a');
unsigned int ep2 = kmsml[alpha_rank('a')] +
gpu_get_occ((long long int)ep,'a');
int kerr2 = 0;
if(gpu_get_bp_2bit(kmsr, re) != 'a')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
sp2 = kmsml[alpha_rank('c')] + 1 +
gpu_get_occ((long long int)sp-1,'c');
ep2 = kmsml[alpha_rank('c')] +
gpu_get_occ((long long int)ep,'c');
if(gpu_get_bp_2bit(kmsr, re) != 'c')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
sp2 = kmsml[alpha_rank('g')] + 1 +
gpu_get_occ((long long int)sp-1,'g');
ep2 = kmsml[alpha_rank('g')] +
gpu_get_occ((long long int)ep,'g');
if(gpu_get_bp_2bit(kmsr, re) != 'g')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
sp2 = kmsml[alpha_rank('t')] + 1 +
gpu_get_occ((long long int)sp-1,'t');
ep2 = kmsml[alpha_rank('t')] +
gpu_get_occ((long long int)ep,'t');
if(gpu_get_bp_2bit(kmsr, re) != 't')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
return;
}
// given a file of short reads, search it in genome, store result in all_ans
// maximum result is limited by ans_size
// maximum mismatch is kerr
__global__
void search_kernel(char* bwt, char* sr, unsigned int* psa,
unsigned int read_num, unsigned int read_size, unsigned int* sml,
unsigned int* occ, unsigned int* all_ans, int kerr)
{
unsigned int i = (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(i < 100000)
{
kmbwt = bwt;
kmocc = occ;
kmpsa = psa;
kmsr = sr;
kmsml = sml;
unsigned int* ans_ptr = all_ans + i;
unsigned int re = read_size*(i+1)-1;
unsigned int sp = 0;
unsigned int ep = (unsigned int)GENOME_SIZE_TEST - 1;
gpu_kmismatch(kerr,re,sp,ep,ans_ptr);
}
}
// c interface
extern "C++" void gpu_search(char* bwt, char* sr, unsigned int* psa,
unsigned int read_num, unsigned int read_size, unsigned int* sml,
unsigned int* occ, unsigned int* all_ans, int kerr)
{
unsigned int x_size, y_size;
dim3 dimBlock(nblock_size);
x_size = (100000 - 1)/nblock_size + 1;
y_size = (x_size - 1)/maxgsx + 1;
x_size = x_size < maxgsx ? x_size : maxgsx;
dim3 dimGrid(x_size, y_size);
crc = hipGetLastError();
hipLaunchKernelGGL(( search_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
bwt,sr,psa,read_num,read_size,sml,occ,all_ans,kerr);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc)
{
printf("gpusearch error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
}
// functions for memory copy allocation and deallocation
extern "C++" void gmalloc(char** gptr, size_t size)
{
hipMalloc(gptr, size);
return;
}
extern "C++" void gmemcpy_htod(char* dst, char* src, size_t size)
{
hipMemcpy(dst, src, size, hipMemcpyHostToDevice);
return;
}
extern "C++" void gmemcpy_dtoh(char* dst, char* src, size_t size)
{
hipMemcpy(dst, src, size, hipMemcpyDeviceToHost);
return;
}
extern "C++" void gfree(char* src)
{
hipFree(src);
return;
}
extern "C++" void set_stack_size(size_t size)
{
hipThreadSetLimit(hipLimitStackSize, size);
return;
}
extern "C++" void set_cache_size(int nscache)
{
hipFuncCache_t cpref;
if ((nscache < 0) || (nscache > 2))
return;
if (nscache==0)
cpref = hipFuncCachePreferNone;
else if (nscache==1)
cpref = hipFuncCachePreferShared;
else if (nscache==2)
cpref = hipFuncCachePreferL1;
hipDeviceSetCacheConfig(cpref);
return;
}
| 814f6f2aee911e10f479f50f0144cd68603fbf43.cu |
#include "misc.h"
#include "gpusearch.h"
#include "cuda.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
static cudaError_t crc;
int nblock_size = 512;
int maxgsx = 65535;
__device__ char *kmbwt, *kmsr;
__device__ unsigned int *kmocc, *kmpsa;
__device__ unsigned int *kmsml;
// given a letter, return its rank in the alphabet
__device__ inline
unsigned int alpha_rank(char l)
{
// rank alphabetically $<a<c<g<t
switch(l)
{
case '$': return 0;
case 'a': return 1;
case 'c': return 2;
case 'g': return 3;
case 't': return 4;
}
return 5;
}
// get the bp at pos
__device__ inline
char gpu_get_bp_2bit(char* genome, unsigned int pos)
{
unsigned long long int bit_pos = pos * ENCODE_SIZE_2BIT;
unsigned long long int byte_pos = bit_pos / BYTE_SIZE;
unsigned long long int byte_off = bit_pos - byte_pos * BYTE_SIZE;
unsigned char dna = genome[byte_pos];
dna = dna << byte_off;
dna = dna >> (BYTE_SIZE - ENCODE_SIZE_2BIT);
switch(dna)
{
case 0: return 'a';
case 1: return 'c';
case 2: return 'g';
case 3: return 't';
}
return '*';
}
// write a bp at a position
__device__ inline
void gpu_write_bp_2bit(char* genome, unsigned int pos, char val)
{
unsigned long long int bit_pos = pos * ENCODE_SIZE_2BIT;
unsigned long long int byte_pos = bit_pos / BYTE_SIZE;
unsigned long long int byte_off = bit_pos - byte_pos * BYTE_SIZE;
switch(val)
{
case 'a':
{
char mask = 0 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
case 'c':
{
char mask = 1 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
case 'g':
{
char mask = 2 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
case 't':
{
char mask = 3 << (BYTE_SIZE - byte_off - ENCODE_SIZE_2BIT);
genome[byte_pos] |= mask;
break;
}
}
}
// given a bwt and a partial occ array, find the actual occ value
__device__ inline
unsigned int gpu_get_occ(long long int pos, char alpha)
{
if(pos < 0)
return 0;
unsigned int occ_off = pos / SAMPLE_SIZE_TEST;
long long int occi = 0;
switch(alpha)
{
case 'a': occi = kmocc[occ_off*ALPHA_SIZE+0]; break;
case 'c': occi = kmocc[occ_off*ALPHA_SIZE+1]; break;
case 'g': occi = kmocc[occ_off*ALPHA_SIZE+2]; break;
case 't': occi = kmocc[occ_off*ALPHA_SIZE+3]; break;
}
unsigned int ext_cnt = 0;
for(unsigned int i = occ_off * SAMPLE_SIZE_TEST + 1;
i < pos && i < (unsigned int)GENOME_SIZE_TEST; i++)
{
char bp = '*';
if(i < BWT_DPOS)
bp = gpu_get_bp_2bit(kmbwt, i);
else if(i == BWT_DPOS)
bp = '$';
else
bp = gpu_get_bp_2bit(kmbwt, i-1);
if(bp == alpha)
ext_cnt++;
}
occi += ext_cnt;
if(pos % SAMPLE_SIZE_TEST == 0)
{
char bp = '*';
if(pos < BWT_DPOS)
bp = gpu_get_bp_2bit(kmbwt, pos);
else if(pos == BWT_DPOS)
bp = '$';
else
bp = gpu_get_bp_2bit(kmbwt, pos-1);
if(bp == alpha)
occi--;
if(occi < 0)
occi = 0;
}
return (unsigned int)occi;
}
// get suffix array value
__device__ inline
unsigned int gpu_get_sa_val(unsigned int pos)
{
unsigned int nmov = 0;
while(pos != BWT_DPOS)
{
if(pos % SAMPLE_SIZE_TEST == 0)
{
nmov += kmpsa[pos/SAMPLE_SIZE_TEST];
break;
}
char bp = '*';
if(pos < BWT_DPOS)
bp = gpu_get_bp_2bit(kmbwt, pos);
else if(pos == BWT_DPOS)
bp = '$';
else
bp = gpu_get_bp_2bit(kmbwt, pos-1);
pos = kmsml[alpha_rank(bp)] +
gpu_get_occ((long long int)pos,bp);
nmov++;
}
return nmov;
}
// k-mismatch search
__device__ inline
void gpu_kmismatch(int kerr,unsigned int re,
unsigned int sp,unsigned int ep,unsigned int* ans)
{
/*
for(long long int i = re; i >= (long long int)rs && ep >= sp; i--)
{
char bp = gpu_get_bp_2bit(sr, i);
sp = sml[alpha_rank(bp)] + 1 +
gpu_get_occ(bwt,occ,genome_size,sample_size,(long long int)sp-1,bp);
ep = sml[alpha_rank(bp)] +
gpu_get_occ(bwt,occ,genome_size,sample_size,(long long int)ep,bp);
}
if(sp <= ep)
ans[0] = psa[sp-1];
return 0;
*/
if(sp > ep)
return;
if(re % READ_SIZE == 0)
{
ans[0] = gpu_get_sa_val(sp-1)-1;
return;
}
unsigned int sp2 = kmsml[alpha_rank('a')] + 1 +
gpu_get_occ((long long int)sp-1,'a');
unsigned int ep2 = kmsml[alpha_rank('a')] +
gpu_get_occ((long long int)ep,'a');
int kerr2 = 0;
if(gpu_get_bp_2bit(kmsr, re) != 'a')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
sp2 = kmsml[alpha_rank('c')] + 1 +
gpu_get_occ((long long int)sp-1,'c');
ep2 = kmsml[alpha_rank('c')] +
gpu_get_occ((long long int)ep,'c');
if(gpu_get_bp_2bit(kmsr, re) != 'c')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
sp2 = kmsml[alpha_rank('g')] + 1 +
gpu_get_occ((long long int)sp-1,'g');
ep2 = kmsml[alpha_rank('g')] +
gpu_get_occ((long long int)ep,'g');
if(gpu_get_bp_2bit(kmsr, re) != 'g')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
sp2 = kmsml[alpha_rank('t')] + 1 +
gpu_get_occ((long long int)sp-1,'t');
ep2 = kmsml[alpha_rank('t')] +
gpu_get_occ((long long int)ep,'t');
if(gpu_get_bp_2bit(kmsr, re) != 't')
kerr2 = kerr - 1;
else
kerr2 = kerr;
if(kerr2 >= 0)
gpu_kmismatch(kerr2,re-1,sp2,ep2,ans);
return;
}
// given a file of short reads, search it in genome, store result in all_ans
// maximum result is limited by ans_size
// maximum mismatch is kerr
__global__
void search_kernel(char* bwt, char* sr, unsigned int* psa,
unsigned int read_num, unsigned int read_size, unsigned int* sml,
unsigned int* occ, unsigned int* all_ans, int kerr)
{
unsigned int i = (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(i < 100000)
{
kmbwt = bwt;
kmocc = occ;
kmpsa = psa;
kmsr = sr;
kmsml = sml;
unsigned int* ans_ptr = all_ans + i;
unsigned int re = read_size*(i+1)-1;
unsigned int sp = 0;
unsigned int ep = (unsigned int)GENOME_SIZE_TEST - 1;
gpu_kmismatch(kerr,re,sp,ep,ans_ptr);
}
}
// c interface
extern "C++" void gpu_search(char* bwt, char* sr, unsigned int* psa,
unsigned int read_num, unsigned int read_size, unsigned int* sml,
unsigned int* occ, unsigned int* all_ans, int kerr)
{
unsigned int x_size, y_size;
dim3 dimBlock(nblock_size);
x_size = (100000 - 1)/nblock_size + 1;
y_size = (x_size - 1)/maxgsx + 1;
x_size = x_size < maxgsx ? x_size : maxgsx;
dim3 dimGrid(x_size, y_size);
crc = cudaGetLastError();
search_kernel<<<dimGrid, dimBlock>>>
(bwt,sr,psa,read_num,read_size,sml,occ,all_ans,kerr);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc)
{
printf("gpusearch error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
}
// functions for memory copy allocation and deallocation
extern "C++" void gmalloc(char** gptr, size_t size)
{
cudaMalloc(gptr, size);
return;
}
extern "C++" void gmemcpy_htod(char* dst, char* src, size_t size)
{
cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice);
return;
}
extern "C++" void gmemcpy_dtoh(char* dst, char* src, size_t size)
{
cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost);
return;
}
extern "C++" void gfree(char* src)
{
cudaFree(src);
return;
}
extern "C++" void set_stack_size(size_t size)
{
cudaThreadSetLimit(cudaLimitStackSize, size);
return;
}
extern "C++" void set_cache_size(int nscache)
{
cudaFuncCache cpref;
if ((nscache < 0) || (nscache > 2))
return;
if (nscache==0)
cpref = cudaFuncCachePreferNone;
else if (nscache==1)
cpref = cudaFuncCachePreferShared;
else if (nscache==2)
cpref = cudaFuncCachePreferL1;
cudaThreadSetCacheConfig(cpref);
return;
}
|
bd46fa57a65f220ae254bff08cd53539d56df0ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/histogram_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
using IndexType = int64_t;
using Tensor = framework::Tensor;
using platform::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T, typename IndexType>
__device__ static IndexType GetBin(T input_value, T min_value, T max_value,
int64_t nbins) {
IndexType bin = static_cast<int>((input_value - min_value) * nbins /
(max_value - min_value));
IndexType output_index = bin < nbins - 1 ? bin : nbins - 1;
return output_index;
}
template <typename T, typename IndexType>
__global__ void KernelHistogram(const T* input, const int total_elements,
const int64_t nbins, const T min_value,
const T max_value, int64_t* output) {
extern __shared__ int64_t buf_hist[];
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
buf_hist[i] = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(input_index, total_elements) {
// const IndexType input_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto input_value = input[input_index];
if (input_value >= min_value && input_value <= max_value) {
const IndexType output_index =
GetBin<T, IndexType>(input_value, min_value, max_value, nbins);
paddle::platform::CudaAtomicAdd(&buf_hist[output_index], 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&output[i], buf_hist[i]);
}
}
template <typename DeviceContext, typename T>
class HistogramCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
const Tensor* input = context.Input<framework::Tensor>("X");
Tensor* output = context.Output<framework::Tensor>("Out");
auto& nbins = context.Attr<int64_t>("bins");
auto& minval = context.Attr<int>("min");
auto& maxval = context.Attr<int>("max");
const T* input_data = input->data<T>();
const int input_numel = input->numel();
int64_t* out_data = output->mutable_data<int64_t>(context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, int64_t>()(
context.template device_context<platform::CUDADeviceContext>(), output,
static_cast<int64_t>(0));
if (input_data == nullptr) return;
T output_min = static_cast<T>(minval);
T output_max = static_cast<T>(maxval);
if (output_min == output_max) {
auto input_x = framework::EigenVector<T>::Flatten(*input);
framework::Tensor input_min_t, input_max_t;
auto* input_min_data =
input_min_t.mutable_data<T>({1}, context.GetPlace());
auto* input_max_data =
input_max_t.mutable_data<T>({1}, context.GetPlace());
auto input_min_scala = framework::EigenScalar<T>::From(input_min_t);
auto input_max_scala = framework::EigenScalar<T>::From(input_max_t);
auto* place =
context.template device_context<DeviceContext>().eigen_device();
input_min_scala.device(*place) = input_x.minimum();
input_max_scala.device(*place) = input_x.maximum();
Tensor input_min_cpu, input_max_cpu;
TensorCopySync(input_min_t, platform::CPUPlace(), &input_min_cpu);
TensorCopySync(input_max_t, platform::CPUPlace(), &input_max_cpu);
output_min = input_min_cpu.data<T>()[0];
output_max = input_max_cpu.data<T>()[0];
}
if (output_min == output_max) {
output_min = output_min - 1;
output_max = output_max + 1;
}
PADDLE_ENFORCE_EQ(
(std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max)) ||
std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max))),
false, platform::errors::OutOfRange("range of min, max is not finite"));
PADDLE_ENFORCE_GE(
output_max, output_min,
platform::errors::InvalidArgument(
"max must be larger or equal to min. If min and max are both zero, "
"the minimum and maximum values of the data are used. "
"But received max is %d, min is %d",
maxval, minval));
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( KernelHistogram<
T, IndexType>), dim3(GET_BLOCKS(input_numel)), dim3(PADDLE_CUDA_NUM_THREADS),
nbins * sizeof(int64_t), stream,
input_data, input_numel, nbins, output_min, output_max, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
histogram,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, double>);
| bd46fa57a65f220ae254bff08cd53539d56df0ee.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/histogram_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
using IndexType = int64_t;
using Tensor = framework::Tensor;
using platform::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T, typename IndexType>
__device__ static IndexType GetBin(T input_value, T min_value, T max_value,
int64_t nbins) {
IndexType bin = static_cast<int>((input_value - min_value) * nbins /
(max_value - min_value));
IndexType output_index = bin < nbins - 1 ? bin : nbins - 1;
return output_index;
}
template <typename T, typename IndexType>
__global__ void KernelHistogram(const T* input, const int total_elements,
const int64_t nbins, const T min_value,
const T max_value, int64_t* output) {
extern __shared__ int64_t buf_hist[];
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
buf_hist[i] = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(input_index, total_elements) {
// const IndexType input_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto input_value = input[input_index];
if (input_value >= min_value && input_value <= max_value) {
const IndexType output_index =
GetBin<T, IndexType>(input_value, min_value, max_value, nbins);
paddle::platform::CudaAtomicAdd(&buf_hist[output_index], 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&output[i], buf_hist[i]);
}
}
template <typename DeviceContext, typename T>
class HistogramCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
const Tensor* input = context.Input<framework::Tensor>("X");
Tensor* output = context.Output<framework::Tensor>("Out");
auto& nbins = context.Attr<int64_t>("bins");
auto& minval = context.Attr<int>("min");
auto& maxval = context.Attr<int>("max");
const T* input_data = input->data<T>();
const int input_numel = input->numel();
int64_t* out_data = output->mutable_data<int64_t>(context.GetPlace());
math::SetConstant<platform::CUDADeviceContext, int64_t>()(
context.template device_context<platform::CUDADeviceContext>(), output,
static_cast<int64_t>(0));
if (input_data == nullptr) return;
T output_min = static_cast<T>(minval);
T output_max = static_cast<T>(maxval);
if (output_min == output_max) {
auto input_x = framework::EigenVector<T>::Flatten(*input);
framework::Tensor input_min_t, input_max_t;
auto* input_min_data =
input_min_t.mutable_data<T>({1}, context.GetPlace());
auto* input_max_data =
input_max_t.mutable_data<T>({1}, context.GetPlace());
auto input_min_scala = framework::EigenScalar<T>::From(input_min_t);
auto input_max_scala = framework::EigenScalar<T>::From(input_max_t);
auto* place =
context.template device_context<DeviceContext>().eigen_device();
input_min_scala.device(*place) = input_x.minimum();
input_max_scala.device(*place) = input_x.maximum();
Tensor input_min_cpu, input_max_cpu;
TensorCopySync(input_min_t, platform::CPUPlace(), &input_min_cpu);
TensorCopySync(input_max_t, platform::CPUPlace(), &input_max_cpu);
output_min = input_min_cpu.data<T>()[0];
output_max = input_max_cpu.data<T>()[0];
}
if (output_min == output_max) {
output_min = output_min - 1;
output_max = output_max + 1;
}
PADDLE_ENFORCE_EQ(
(std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max)) ||
std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max))),
false, platform::errors::OutOfRange("range of min, max is not finite"));
PADDLE_ENFORCE_GE(
output_max, output_min,
platform::errors::InvalidArgument(
"max must be larger or equal to min. If min and max are both zero, "
"the minimum and maximum values of the data are used. "
"But received max is %d, min is %d",
maxval, minval));
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
KernelHistogram<
T, IndexType><<<GET_BLOCKS(input_numel), PADDLE_CUDA_NUM_THREADS,
nbins * sizeof(int64_t), stream>>>(
input_data, input_numel, nbins, output_min, output_max, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
histogram,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::HistogramCUDAKernel<paddle::platform::CUDADeviceContext, double>);
|
310b01113d5dab5bf1da9ce75ca687d025bd4cb0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politcnica de Valncia (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <cstdio>
#include <string>
#include <stdexcept>
#include <iostream>
#include "eddl/hardware/gpu/gpu_tensor.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
// CUDA, NVIDIA compute capabilities:
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
// -----------------------------------------------------------------
// GRID
// Maximum dimensionality of grid of thread blocks: 3
// Maximum x-dimension of a grid of thread blocks (2^31)-1
// Maximum y- or z-dimension of a grid of thread blocks: 65535
// THREAD BLOCK
// Maximum dimensionality of thread block: 3
// Maximum x- or y-dimension of a block: 1024
// Maximum z-dimension of a block: 64
//
// Maximum number of threads per block: 1024
// -----------------------------------------------------------------
hipblasHandle_t hcublas[64];
hiprandGenerator_t random_generator[64];
hipblasStatus_t bstatus;
hiprandStatus_t rstatus;
#ifdef cCUDNN
cudnnStatus_t dstatus;
cudnnHandle_t hdnn[64];
#endif
static const char *_curandGetErrorEnum(hiprandStatus_t error){
switch (error)
{
case HIPRAND_STATUS_ALLOCATION_FAILED:
return "HIPRAND_STATUS_ALLOCATION_FAILED";
case HIPRAND_STATUS_INITIALIZATION_FAILED:
return "HIPRAND_STATUS_INITIALIZATION_FAILED";
case HIPRAND_STATUS_VERSION_MISMATCH:
return "HIPRAND_STATUS_VERSION_MISMATCH";
case HIPRAND_STATUS_TYPE_ERROR:
return "HIPRAND_STATUS_TYPE_ERROR";
case HIPRAND_STATUS_OUT_OF_RANGE:
return "HIPRAND_STATUS_OUT_OF_RANGE";
case HIPRAND_STATUS_PREEXISTING_FAILURE:
return "HIPRAND_STATUS_PREEXISTING_FAILURE";
case HIPRAND_STATUS_NOT_INITIALIZED:
return "HIPRAND_STATUS_NOT_INITIALIZED";
case HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case HIPRAND_STATUS_LENGTH_NOT_MULTIPLE:
return "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE";
default:
std::string text = "unknown hiprand error: " + std::to_string(error) + " | (_curandGetErrorEnum)";
throw std::invalid_argument(text);
}
}
void check_cublas(hipblasStatus_t status, const char *f)
{
if ( status!= HIPBLAS_STATUS_SUCCESS)
{
std::string text = "error in cublas execution in " + std::string(f) + " | (check_cublas)";
throw std::runtime_error(text);
}
}
void check_curand(hiprandStatus_t status, const char *f)
{
if ( status!= HIPRAND_STATUS_SUCCESS)
{
std::string text = "error in hiprand execution in " + std::string(_curandGetErrorEnum(status)) + " | (check_curand)";
throw std::runtime_error(text);
}
}
void check_cuda(hipError_t err,const char *msg)
{
if(err!=hipSuccess)
{
std::string error_type = hipGetErrorString(err);
std::string text = "[CUDA ERROR]: " + error_type + " ("+ std::to_string(err) + ") raised in " + std::string(msg) + " | (check_cuda)";
throw std::runtime_error(text);
}
}
#ifdef cCUDNN
void check_cudnn(cudnnStatus_t status, const char *msg, const char *file, int line)
{
if (status != CUDNN_STATUS_SUCCESS)
{
std::string error_type = cudnnGetErrorString(status);
std::string text = "[CUDNN ERROR]: " + error_type + " ("+ std::to_string(status) + ") raised in " + std::string(msg) + " at " + std::string(file) + " file: " +std::to_string(line) +" line. | (check_cudnn)" ;
throw std::runtime_error(text);
}
}
#endif
void gpu_set_device(int device)
{
hipSetDevice(device);
}
void gpu_init(int device)
{
int nDevices;
hipGetDeviceCount(&nDevices);
if (device>nDevices)
{
std::string text = "GPU " + std::to_string(device) + " not available. Number of available GPUs is " + std::to_string(nDevices) + ". Further information running nvidia-smi | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"Selecting GPU device %d\n",device);
hipSetDevice(device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,device);
fprintf(stderr,"EDDL is running on GPU device %d, %s\n",device,prop.name);
/// CUBLAS
bstatus=hipblasCreate(&(hcublas[device]));
// try to init cublas several times
int i=0;
while ((bstatus!= HIPBLAS_STATUS_SUCCESS)&&(i<10)) {
bstatus=hipblasCreate(&(hcublas[device]));
i++;
fprintf(stderr,".\n");
}
if ( bstatus!= HIPBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas create (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuBlas initialized on GPU device %d, %s\n",device,prop.name);
bstatus = hipblasSetAtomicsMode(hcublas[device],HIPBLAS_ATOMICS_NOT_ALLOWED);
if ( bstatus!= HIPBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas execution getting: NOT IMPLEMENTED | (gpu_init)";
throw std::runtime_error(text);
}
// CURAND
rstatus=hiprandCreateGenerator(&(random_generator[device]),HIPRAND_RNG_PSEUDO_MRG32K3A);
if (rstatus != HIPRAND_STATUS_SUCCESS)
{
std::string text = "error creating random numbers on gpu | (gpu_init)";
throw std::runtime_error(text);
}
rstatus=hiprandSetPseudoRandomGeneratorSeed(random_generator[device],1234);
if (rstatus != HIPRAND_STATUS_SUCCESS) {
std::string text = "error setting the seed for program | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuRand initialized on GPU device %d, %s\n",device,prop.name);
#ifdef cCUDNN
// CUDNN
dstatus=cudnnCreate(&hdnn[device]);
if (dstatus != CUDNN_STATUS_SUCCESS) {
std::string text = "problem in cudnn create (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuDNN initialized on GPU device %d, %s\n",device,prop.name);
#endif
}
float* gpu_create_tensor(int dev,int size)
{
float* devicePointer;
hipSetDevice(dev);
check_cuda(hipMalloc((void**)&devicePointer,size*sizeof(float)),"create_tensor");
return devicePointer;
}
void gpu_delete_tensor(int dev, float* p)
{
hipSetDevice(dev);
check_cuda(hipFree(p),"delete_tensor");
}
void gpu_delete_tensor_int(int dev, int* p)
{
hipSetDevice(dev);
check_cuda(hipFree(p),"delete_tensor_int");
}
int gpu_devices()
{
int nDevices;
hipGetDeviceCount(&nDevices);
return nDevices;
}
| 310b01113d5dab5bf1da9ce75ca687d025bd4cb0.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politècnica de València (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <cstdio>
#include <string>
#include <stdexcept>
#include <iostream>
#include "eddl/hardware/gpu/gpu_tensor.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
// CUDA, NVIDIA compute capabilities:
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
// -----------------------------------------------------------------
// GRID
// Maximum dimensionality of grid of thread blocks: 3
// Maximum x-dimension of a grid of thread blocks (2^31)-1
// Maximum y- or z-dimension of a grid of thread blocks: 65535
// THREAD BLOCK
// Maximum dimensionality of thread block: 3
// Maximum x- or y-dimension of a block: 1024
// Maximum z-dimension of a block: 64
//
// Maximum number of threads per block: 1024
// -----------------------------------------------------------------
cublasHandle_t hcublas[64];
curandGenerator_t random_generator[64];
cublasStatus_t bstatus;
curandStatus_t rstatus;
#ifdef cCUDNN
cudnnStatus_t dstatus;
cudnnHandle_t hdnn[64];
#endif
static const char *_curandGetErrorEnum(curandStatus_t error){
switch (error)
{
case CURAND_STATUS_ALLOCATION_FAILED:
return "CURAND_STATUS_ALLOCATION_FAILED";
case CURAND_STATUS_INITIALIZATION_FAILED:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case CURAND_STATUS_VERSION_MISMATCH:
return "CURAND_STATUS_VERSION_MISMATCH";
case CURAND_STATUS_TYPE_ERROR:
return "CURAND_STATUS_TYPE_ERROR";
case CURAND_STATUS_OUT_OF_RANGE:
return "CURAND_STATUS_OUT_OF_RANGE";
case CURAND_STATUS_PREEXISTING_FAILURE:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case CURAND_STATUS_NOT_INITIALIZED:
return "CURAND_STATUS_NOT_INITIALIZED";
case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
default:
std::string text = "unknown curand error: " + std::to_string(error) + " | (_curandGetErrorEnum)";
throw std::invalid_argument(text);
}
}
void check_cublas(cublasStatus_t status, const char *f)
{
if ( status!= CUBLAS_STATUS_SUCCESS)
{
std::string text = "error in cublas execution in " + std::string(f) + " | (check_cublas)";
throw std::runtime_error(text);
}
}
void check_curand(curandStatus_t status, const char *f)
{
if ( status!= CURAND_STATUS_SUCCESS)
{
std::string text = "error in curand execution in " + std::string(_curandGetErrorEnum(status)) + " | (check_curand)";
throw std::runtime_error(text);
}
}
void check_cuda(cudaError_t err,const char *msg)
{
if(err!=cudaSuccess)
{
std::string error_type = cudaGetErrorString(err);
std::string text = "[CUDA ERROR]: " + error_type + " ("+ std::to_string(err) + ") raised in " + std::string(msg) + " | (check_cuda)";
throw std::runtime_error(text);
}
}
#ifdef cCUDNN
void check_cudnn(cudnnStatus_t status, const char *msg, const char *file, int line)
{
if (status != CUDNN_STATUS_SUCCESS)
{
std::string error_type = cudnnGetErrorString(status);
std::string text = "[CUDNN ERROR]: " + error_type + " ("+ std::to_string(status) + ") raised in " + std::string(msg) + " at " + std::string(file) + " file: " +std::to_string(line) +" line. | (check_cudnn)" ;
throw std::runtime_error(text);
}
}
#endif
void gpu_set_device(int device)
{
cudaSetDevice(device);
}
void gpu_init(int device)
{
int nDevices;
cudaGetDeviceCount(&nDevices);
if (device>nDevices)
{
std::string text = "GPU " + std::to_string(device) + " not available. Number of available GPUs is " + std::to_string(nDevices) + ". Further information running nvidia-smi | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"Selecting GPU device %d\n",device);
cudaSetDevice(device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,device);
fprintf(stderr,"EDDL is running on GPU device %d, %s\n",device,prop.name);
/// CUBLAS
bstatus=cublasCreate(&(hcublas[device]));
// try to init cublas several times
int i=0;
while ((bstatus!= CUBLAS_STATUS_SUCCESS)&&(i<10)) {
bstatus=cublasCreate(&(hcublas[device]));
i++;
fprintf(stderr,".\n");
}
if ( bstatus!= CUBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas create (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuBlas initialized on GPU device %d, %s\n",device,prop.name);
bstatus = cublasSetAtomicsMode(hcublas[device],CUBLAS_ATOMICS_NOT_ALLOWED);
if ( bstatus!= CUBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas execution getting: NOT IMPLEMENTED | (gpu_init)";
throw std::runtime_error(text);
}
// CURAND
rstatus=curandCreateGenerator(&(random_generator[device]),CURAND_RNG_PSEUDO_MRG32K3A);
if (rstatus != CURAND_STATUS_SUCCESS)
{
std::string text = "error creating random numbers on gpu | (gpu_init)";
throw std::runtime_error(text);
}
rstatus=curandSetPseudoRandomGeneratorSeed(random_generator[device],1234);
if (rstatus != CURAND_STATUS_SUCCESS) {
std::string text = "error setting the seed for program | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuRand initialized on GPU device %d, %s\n",device,prop.name);
#ifdef cCUDNN
// CUDNN
dstatus=cudnnCreate(&hdnn[device]);
if (dstatus != CUDNN_STATUS_SUCCESS) {
std::string text = "problem in cudnn create (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuDNN initialized on GPU device %d, %s\n",device,prop.name);
#endif
}
float* gpu_create_tensor(int dev,int size)
{
float* devicePointer;
cudaSetDevice(dev);
check_cuda(cudaMalloc((void**)&devicePointer,size*sizeof(float)),"create_tensor");
return devicePointer;
}
void gpu_delete_tensor(int dev, float* p)
{
cudaSetDevice(dev);
check_cuda(cudaFree(p),"delete_tensor");
}
void gpu_delete_tensor_int(int dev, int* p)
{
cudaSetDevice(dev);
check_cuda(cudaFree(p),"delete_tensor_int");
}
int gpu_devices()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
return nDevices;
}
|
918c8de71206d71e5cb2e5d0b78dd8c6200f3db4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* UmbrellaSampling.cu
*
* Created on: Jul 22, 2011
* Author: alekseenko
*
*/
#include <Core/global.h>
#include <Util/Log.h>
#include <Util/wrapper.h>
#include <Util/mystl.h>
#include <Util/atomfilter.h>
#include "UmbrellaSampling.cuh"
#include <set>
namespace umbrella_sampling {
class Log: public ILog {
virtual void Write(const char* message) const {
std::cout << makeTimePrefix() << "<umbrella_sampling> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
#include "UmbrellaSampling_com.cu"
//#include "UmbrellaSampling_rot.cu"
void create() {
LOG << "create";
if(getYesNoParameter(PARAMETER_UMBRELLA, 0)) {
// Initialize all necessary structures...
potential.destroy = &destroy;
sprintf(potential.name, "Umbrella potential");
debug = getYesNoParameter(PARAMETER_UMBRELLA_DEBUG, 0);
init();
// read parameters...
std::string stage, method;
stage = getParameterAs<std::string> (PARAMETER_UMBRELLA_STAGE);
method = getParameterAs<std::string> (PARAMETER_UMBRELLA_METHOD);
sampling_params.win_step = getFloatParameter (PARAMETER_UMBRELLA_WINSTEP);
sampling_params.energyfile = getMaskedParameterAs<std::string> (PARAMETER_UMBRELLA_OUTFILE);
for (int i = 0; i < parameters.Ntr; ++i)
fclose(safe_fopen(string_replace(sampling_params.energyfile,"<run>",any2str(i + parameters.firstrun)).c_str(),"w")); // Clear file. Ugly way, but I'm too lazy to implement smth. better
// Allocate some memory...
allocateCPU((void**)&h_data.atomGroups, gsystem.Ntot*sizeof(int));
allocateGPU((void**)&d_data.atomGroups, gsystem.Ntot*sizeof(int));
allocateCPU((void**)&sampling_params.rcs, parameters.Ntr*sizeof(float4));
allocateCPU((void**)&sampling_params.energy, parameters.Ntr*sizeof(float2));
// Get K-spring for movement along reaction coordinate
h_data.ks = d_data.ks = getFloatParameter(PARAMETER_UMBRELLA_KS);
// Get K-spring for orthogonal movement. Default is 0 (orthogonal movement is unrestricted)
h_data.ks_ortho = d_data.ks_ortho = getFloatParameter(PARAMETER_UMBRELLA_ORTHOGONAL_KS, 0.0f);
// Set up updater freqency: how often we move cantilever (if it's moved) and how often we output energies.
updater.frequency = getIntegerParameter(PARAMETER_UMBRELLA_FREQ);
// Specify stage of sampling..
if (stage == PARAMETER_VALUE_UMBRELLA_STAGE_PREPARE) {
LOG << "Preparation (pulling) will be performed";
sampling_params.stage = UMBRELLA_STAGE_PREPARATION;
// Set initial RCs to zero
for (int i = 0; i < parameters.Ntr; ++i)
sampling_params.rcs[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
} else {
if (stage == PARAMETER_VALUE_UMBRELLA_STAGE_PRODUCTION) {
LOG << "Production run will be performed";
sampling_params.stage = UMBRELLA_STAGE_PRODUCTION;
// Load trajectories (we just overwrite previous data...). Not very elegant, but works...
for (int i = 0; i < parameters.Ntr; ++i) {
int traj = i + parameters.firstrun;
char trajFilename[1024];
LOG << "Loading window # " << traj;
getMaskedParameterWithReplacementT(trajFilename, PARAMETER_UMBRELLA_FILE_FRAME, PARAMETER_STRING_UNDEFINED, traj, "<run>");
// Read coordinates to host buffer
readCoordinates(trajFilename);
// Screw velocities. Random ones are ok.
float T = getFloatParameter(PARAMETER_INITIAL_TEMPERATURE, -1.0f);
if(T == -1.0f){
T = getFloatParameter(PARAMETER_TEMPERATURE, 0.0f);
}
generateVelocities ( T );
// Copy data to appropriate place on GPU
copyCoordinatesToGPU(i, 1);
copyVelocitiesToGPU(i, 1);
sampling_params.rcs[i] = make_float4(0.0f, 0.0f, 0.0f, traj); // .w-th component is window number
}
} else {
LOG << "Error: Stage parameter should be either '" << PARAMETER_VALUE_UMBRELLA_STAGE_PREPARE << "' for generating initial positions" << \
"or '" << PARAMETER_VALUE_UMBRELLA_STAGE_PRODUCTION << "' for production run. It is '" << stage << "'";
DIE("Wrong parameter!");
}
}
// Prepare run for specific method
if (method == PARAMETER_VALUE_UMBRELLA_METHOD_COM) {
initCoM();
/* else if (method == PARAMETER_VALUE_UMBRELLA_METHOD_ROT) {
initRot(); */
} else {
LOG << "Error: Umbrella sampling supports only '" << PARAMETER_VALUE_UMBRELLA_METHOD_COM << "' method right now. We're very sorry.";
DIE("Wrong parameter!");
}
hipMemcpy(d_data.atomGroups, h_data.atomGroups, gsystem.Ntot*sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("Copying atom groups to device");
// Copy common data...
// hipMemcpyToSymbol("umbrella_sampling_c_data", &d_data, sizeof(Data), 0, hipMemcpyHostToDevice);
// checkCUDAError("Initializing c_data");
// Announce that we have updater and potential
updaters[updatersCount++] = &updater;
potentials[potentialsCount++] = &potential;
} else {
LOG << "Umbrella sampling is disabled";
}
LOG << "Done initializing";
}
void init() {
blockSize = BLOCK_SIZE;
blockCount = gsystem.Ntot/BLOCK_SIZE + 1;
blockCountTr = gsystem.N / BLOCK_SIZE + 1;
}
/*
// Unused now, can be used later... Somewhere.. May be...
template <typename T>
__device__ __host__ void findPerpendiculars(const <T> &in, <T> &out1, <T> &out2) {
#define X in.x
#define Y in.y
#define Z in.z
out1 = T(-Y, X, 0.0f);
if (X == Y && Y == 0.0f) {
out1 = T(1.0f, 1.0f, 0.0f);
}
out2 = T(-X*Z, -Y*Z, X*X + Y*Y);
#undef X
#undef Y
#undef Z
}
*/
void destroy() {
// Freeing allocated memory is too mainstream
}
void updaterDestroy() {}
#undef LOG
} // namespace umbrella_sampling
| 918c8de71206d71e5cb2e5d0b78dd8c6200f3db4.cu | /*
* UmbrellaSampling.cu
*
* Created on: Jul 22, 2011
* Author: alekseenko
*
*/
#include <Core/global.h>
#include <Util/Log.h>
#include <Util/wrapper.h>
#include <Util/mystl.h>
#include <Util/atomfilter.h>
#include "UmbrellaSampling.cuh"
#include <set>
namespace umbrella_sampling {
class Log: public ILog {
virtual void Write(const char* message) const {
std::cout << makeTimePrefix() << "<umbrella_sampling> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
#include "UmbrellaSampling_com.cu"
//#include "UmbrellaSampling_rot.cu"
void create() {
LOG << "create";
if(getYesNoParameter(PARAMETER_UMBRELLA, 0)) {
// Initialize all necessary structures...
potential.destroy = &destroy;
sprintf(potential.name, "Umbrella potential");
debug = getYesNoParameter(PARAMETER_UMBRELLA_DEBUG, 0);
init();
// read parameters...
std::string stage, method;
stage = getParameterAs<std::string> (PARAMETER_UMBRELLA_STAGE);
method = getParameterAs<std::string> (PARAMETER_UMBRELLA_METHOD);
sampling_params.win_step = getFloatParameter (PARAMETER_UMBRELLA_WINSTEP);
sampling_params.energyfile = getMaskedParameterAs<std::string> (PARAMETER_UMBRELLA_OUTFILE);
for (int i = 0; i < parameters.Ntr; ++i)
fclose(safe_fopen(string_replace(sampling_params.energyfile,"<run>",any2str(i + parameters.firstrun)).c_str(),"w")); // Clear file. Ugly way, but I'm too lazy to implement smth. better
// Allocate some memory...
allocateCPU((void**)&h_data.atomGroups, gsystem.Ntot*sizeof(int));
allocateGPU((void**)&d_data.atomGroups, gsystem.Ntot*sizeof(int));
allocateCPU((void**)&sampling_params.rcs, parameters.Ntr*sizeof(float4));
allocateCPU((void**)&sampling_params.energy, parameters.Ntr*sizeof(float2));
// Get K-spring for movement along reaction coordinate
h_data.ks = d_data.ks = getFloatParameter(PARAMETER_UMBRELLA_KS);
// Get K-spring for orthogonal movement. Default is 0 (orthogonal movement is unrestricted)
h_data.ks_ortho = d_data.ks_ortho = getFloatParameter(PARAMETER_UMBRELLA_ORTHOGONAL_KS, 0.0f);
// Set up updater freqency: how often we move cantilever (if it's moved) and how often we output energies.
updater.frequency = getIntegerParameter(PARAMETER_UMBRELLA_FREQ);
// Specify stage of sampling..
if (stage == PARAMETER_VALUE_UMBRELLA_STAGE_PREPARE) {
LOG << "Preparation (pulling) will be performed";
sampling_params.stage = UMBRELLA_STAGE_PREPARATION;
// Set initial RCs to zero
for (int i = 0; i < parameters.Ntr; ++i)
sampling_params.rcs[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
} else {
if (stage == PARAMETER_VALUE_UMBRELLA_STAGE_PRODUCTION) {
LOG << "Production run will be performed";
sampling_params.stage = UMBRELLA_STAGE_PRODUCTION;
// Load trajectories (we just overwrite previous data...). Not very elegant, but works...
for (int i = 0; i < parameters.Ntr; ++i) {
int traj = i + parameters.firstrun;
char trajFilename[1024];
LOG << "Loading window # " << traj;
getMaskedParameterWithReplacementT(trajFilename, PARAMETER_UMBRELLA_FILE_FRAME, PARAMETER_STRING_UNDEFINED, traj, "<run>");
// Read coordinates to host buffer
readCoordinates(trajFilename);
// Screw velocities. Random ones are ok.
float T = getFloatParameter(PARAMETER_INITIAL_TEMPERATURE, -1.0f);
if(T == -1.0f){
T = getFloatParameter(PARAMETER_TEMPERATURE, 0.0f);
}
generateVelocities ( T );
// Copy data to appropriate place on GPU
copyCoordinatesToGPU(i, 1);
copyVelocitiesToGPU(i, 1);
sampling_params.rcs[i] = make_float4(0.0f, 0.0f, 0.0f, traj); // .w-th component is window number
}
} else {
LOG << "Error: Stage parameter should be either '" << PARAMETER_VALUE_UMBRELLA_STAGE_PREPARE << "' for generating initial positions" << \
"or '" << PARAMETER_VALUE_UMBRELLA_STAGE_PRODUCTION << "' for production run. It is '" << stage << "'";
DIE("Wrong parameter!");
}
}
// Prepare run for specific method
if (method == PARAMETER_VALUE_UMBRELLA_METHOD_COM) {
initCoM();
/* else if (method == PARAMETER_VALUE_UMBRELLA_METHOD_ROT) {
initRot(); */
} else {
LOG << "Error: Umbrella sampling supports only '" << PARAMETER_VALUE_UMBRELLA_METHOD_COM << "' method right now. We're very sorry.";
DIE("Wrong parameter!");
}
cudaMemcpy(d_data.atomGroups, h_data.atomGroups, gsystem.Ntot*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("Copying atom groups to device");
// Copy common data...
// cudaMemcpyToSymbol("umbrella_sampling_c_data", &d_data, sizeof(Data), 0, cudaMemcpyHostToDevice);
// checkCUDAError("Initializing c_data");
// Announce that we have updater and potential
updaters[updatersCount++] = &updater;
potentials[potentialsCount++] = &potential;
} else {
LOG << "Umbrella sampling is disabled";
}
LOG << "Done initializing";
}
void init() {
blockSize = BLOCK_SIZE;
blockCount = gsystem.Ntot/BLOCK_SIZE + 1;
blockCountTr = gsystem.N / BLOCK_SIZE + 1;
}
/*
// Unused now, can be used later... Somewhere.. May be...
template <typename T>
__device__ __host__ void findPerpendiculars(const <T> &in, <T> &out1, <T> &out2) {
#define X in.x
#define Y in.y
#define Z in.z
out1 = T(-Y, X, 0.0f);
if (X == Y && Y == 0.0f) {
out1 = T(1.0f, 1.0f, 0.0f);
}
out2 = T(-X*Z, -Y*Z, X*X + Y*Y);
#undef X
#undef Y
#undef Z
}
*/
void destroy() {
// Freeing allocated memory is too mainstream
}
void updaterDestroy() {}
#undef LOG
} // namespace umbrella_sampling
|
432bc51619de0f8031ac6d95a74fe5dbb1806c85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include <stdio.h>
#include <rocblas.h>
#include "cdp_lu.h"
extern __device__ void report_error(const char *strName, int info);
extern __device__ __noinline__ void dgetf2(hipblasHandle_t cb_handle, int m, int n, double *A, int lda, int *ipiv, int *info);
extern __global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2);
#define DGETRF_BLOCK_SIZE 32
__device__ __noinline__ void dgetrf(hipblasHandle_t cb_handle, hipStream_t stream, int m, int n, double *A, int lda, int *ipiv, int *info)
{
hipblasStatus_t status;
// The flag set by one thread to indicate a failure.
__shared__ int s_info;
// Initialize to 0
if (threadIdx.x == 0)
{
s_info = 0;
}
*info = 0;
if (m < 0)
{
*info = -1;
}
if (n < 0)
{
*info = -2;
}
if (lda < max(1, m))
{
*info = -4;
}
if (*info)
{
if (threadIdx.x == 0)
report_error("DGETRF", *info);
return;
}
// Quick return if possible
if (m == 0 || n == 0)
{
return;
}
// Determine the block size for this environment.
int nb = 64;
const int minDim = min(m, n);
if (nb < 1 || nb > minDim)
{
// We're too small - fall through to just calling dgetf2.
dgetf2(cb_handle, m, n, A, lda, ipiv, info);
return;
}
// Big enough to use blocked code.
for (int j = 0 ; j < minDim ; j += nb)
{
int iinfo;
int jb = min(minDim - j, nb);
if (threadIdx.x == 0)
{
// Factor diagonal and subdiagonal blocks and test for exact singularity.
dgetf2(cb_handle, m-j, jb, &A[j*lda + j], lda, &ipiv[j], &iinfo);
// Adjust INFO and the pivot indices.
if (*info == 0 && iinfo > 0)
s_info = iinfo + j;
}
__syncthreads();
// Make sure info is valid.
*info = s_info;
// We update ipiv in parallel on the device, if we were launched with >1 threads
for (int i = j+threadIdx.x, end = min(m, j+jb) ; i < end ; i += blockDim.x)
ipiv[i] += j;
__syncthreads();
// Apply interchanges to columns 1:J-1. JB rows.
if (threadIdx.x == 0)
{
if (j > 0)
hipLaunchKernelGGL(( dlaswp), dim3(1), dim3(256), 0, stream, j, A, lda, ipiv, j, j+jb);
// Apply interchanges to columns J+JB:N. JB rows.
if (j+jb < n)
{
hipLaunchKernelGGL(( dlaswp), dim3(1), dim3(256), 0, stream, n-j-jb, &A[(j+jb)*lda], lda, ipiv, j, j+jb);
double one = 1.0;
status = hipblasDtrsm(
cb_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_UNIT,
jb, n-j-jb,
&one,
&A[j*lda+j], lda,
&A[(j+jb)*lda+j], lda);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dtrsm: %d\n", status);
s_info = 1;
}
}
}
__syncthreads();
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
// Update trailing submatrix.
if (threadIdx.x == 0 && j + jb < m)
{
double one = 1.0;
double minus_one = -1.0;
status = hipblasDgemm(
cb_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m-j-jb, n-j-jb, jb,
&minus_one,
&A[j*lda + j+jb], lda,
&A[(j+jb)*lda + j], lda,
&one,
&A[(j+jb)*lda + j+jb], lda);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dgemm: %d\n", status);
s_info = 1;
}
}
__syncthreads();
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
}
}
////////////////////////////////////////////////////////////
//
// Entry functions for host-side and device-side calling
//
////////////////////////////////////////////////////////////
__global__ void dgetrf_cdpentry(Parameters *device_params)
{
hipblasHandle_t cb_handle = NULL;
hipStream_t stream;
if (threadIdx.x == 0)
{
hipblasStatus_t status = hipblasCreate(&cb_handle);
hipblasSetPointerMode(cb_handle, HIPBLAS_POINTER_MODE_HOST);
if (status != HIPBLAS_STATUS_SUCCESS)
{
*device_params->device_info = -8;
printf("dgetrf: Failed to create cublas context - status = %d\n", status);
return;
}
// Create a local stream for all of our operations
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
hipblasSetStream(cb_handle, stream);
}
__syncthreads(); // Compiler requires this to not tail-split the if()...
dgetrf(cb_handle,
stream,
device_params->m,
device_params->n,
device_params->device_LU,
device_params->lda,
device_params->device_piv,
device_params->device_info);
}
| 432bc51619de0f8031ac6d95a74fe5dbb1806c85.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include <stdio.h>
#include <cublas_v2.h>
#include "cdp_lu.h"
extern __device__ void report_error(const char *strName, int info);
extern __device__ __noinline__ void dgetf2(cublasHandle_t cb_handle, int m, int n, double *A, int lda, int *ipiv, int *info);
extern __global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2);
#define DGETRF_BLOCK_SIZE 32
__device__ __noinline__ void dgetrf(cublasHandle_t cb_handle, cudaStream_t stream, int m, int n, double *A, int lda, int *ipiv, int *info)
{
cublasStatus_t status;
// The flag set by one thread to indicate a failure.
__shared__ int s_info;
// Initialize to 0
if (threadIdx.x == 0)
{
s_info = 0;
}
*info = 0;
if (m < 0)
{
*info = -1;
}
if (n < 0)
{
*info = -2;
}
if (lda < max(1, m))
{
*info = -4;
}
if (*info)
{
if (threadIdx.x == 0)
report_error("DGETRF", *info);
return;
}
// Quick return if possible
if (m == 0 || n == 0)
{
return;
}
// Determine the block size for this environment.
int nb = 64;
const int minDim = min(m, n);
if (nb < 1 || nb > minDim)
{
// We're too small - fall through to just calling dgetf2.
dgetf2(cb_handle, m, n, A, lda, ipiv, info);
return;
}
// Big enough to use blocked code.
for (int j = 0 ; j < minDim ; j += nb)
{
int iinfo;
int jb = min(minDim - j, nb);
if (threadIdx.x == 0)
{
// Factor diagonal and subdiagonal blocks and test for exact singularity.
dgetf2(cb_handle, m-j, jb, &A[j*lda + j], lda, &ipiv[j], &iinfo);
// Adjust INFO and the pivot indices.
if (*info == 0 && iinfo > 0)
s_info = iinfo + j;
}
__syncthreads();
// Make sure info is valid.
*info = s_info;
// We update ipiv in parallel on the device, if we were launched with >1 threads
for (int i = j+threadIdx.x, end = min(m, j+jb) ; i < end ; i += blockDim.x)
ipiv[i] += j;
__syncthreads();
// Apply interchanges to columns 1:J-1. JB rows.
if (threadIdx.x == 0)
{
if (j > 0)
dlaswp<<<1, 256, 0, stream>>>(j, A, lda, ipiv, j, j+jb);
// Apply interchanges to columns J+JB:N. JB rows.
if (j+jb < n)
{
dlaswp<<<1, 256, 0, stream>>>(n-j-jb, &A[(j+jb)*lda], lda, ipiv, j, j+jb);
double one = 1.0;
status = cublasDtrsm_v2(
cb_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
jb, n-j-jb,
&one,
&A[j*lda+j], lda,
&A[(j+jb)*lda+j], lda);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dtrsm: %d\n", status);
s_info = 1;
}
}
}
__syncthreads();
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
// Update trailing submatrix.
if (threadIdx.x == 0 && j + jb < m)
{
double one = 1.0;
double minus_one = -1.0;
status = cublasDgemm_v2(
cb_handle, CUBLAS_OP_N, CUBLAS_OP_N,
m-j-jb, n-j-jb, jb,
&minus_one,
&A[j*lda + j+jb], lda,
&A[(j+jb)*lda + j], lda,
&one,
&A[(j+jb)*lda + j+jb], lda);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dgemm: %d\n", status);
s_info = 1;
}
}
__syncthreads();
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
}
}
////////////////////////////////////////////////////////////
//
// Entry functions for host-side and device-side calling
//
////////////////////////////////////////////////////////////
__global__ void dgetrf_cdpentry(Parameters *device_params)
{
cublasHandle_t cb_handle = NULL;
cudaStream_t stream;
if (threadIdx.x == 0)
{
cublasStatus_t status = cublasCreate_v2(&cb_handle);
cublasSetPointerMode_v2(cb_handle, CUBLAS_POINTER_MODE_HOST);
if (status != CUBLAS_STATUS_SUCCESS)
{
*device_params->device_info = -8;
printf("dgetrf: Failed to create cublas context - status = %d\n", status);
return;
}
// Create a local stream for all of our operations
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
cublasSetStream_v2(cb_handle, stream);
}
__syncthreads(); // Compiler requires this to not tail-split the if()...
dgetrf(cb_handle,
stream,
device_params->m,
device_params->n,
device_params->device_LU,
device_params->lda,
device_params->device_piv,
device_params->device_info);
}
|
d60e51d1e26a7f6f6ed8882a2332d0e26159087f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void naivekernel(float* output, float* frameA, float* frameB, int chans) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = i<<1;
if (i < chans) {
int test = frameA[j] >= frameB[j];
if (test) {
output[j] = frameA[j];
output[j+1] = frameA[j+1];
}
else {
output[j] = frameB[j];
output[j+1] = frameB[j+1];
}
}
} | d60e51d1e26a7f6f6ed8882a2332d0e26159087f.cu | #include "includes.h"
__global__ void naivekernel(float* output, float* frameA, float* frameB, int chans) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = i<<1;
if (i < chans) {
int test = frameA[j] >= frameB[j];
if (test) {
output[j] = frameA[j];
output[j+1] = frameA[j+1];
}
else {
output[j] = frameB[j];
output[j+1] = frameB[j+1];
}
}
} |
5f909935c6eff3887a85c92a8b7e773ed83ea9f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include "linalg.h"
#include "state.cuh"
#include <cstdio>
#include <vector>
__global__ void saxpy(int n, real a, real *x, real *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a * x[i] + y[i];
}
}
void saxpy_cuda(int N, real alpha, real *x, real *y) {
real *d_x, *d_y;
hipMalloc(&d_x, N * sizeof(real));
hipMalloc(&d_y, N * sizeof(real));
hipMemcpy(d_x, x, N * sizeof(real), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N * sizeof(real), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy), dim3((N + 255) / 256), dim3(256), 0, 0, N, alpha, d_x, d_y);
hipMemcpy(y, d_y, N * sizeof(real), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_y);
}
__global__ void test_svd(int n,
Matrix3 *A,
Matrix3 *U,
Matrix3 *sig,
Matrix3 *V) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n) {
svd(A[id], U[id], sig[id], V[id]);
}
}
// 3D only..
void test_svd_cuda(int n, real *A, real *U, real *sig, real *V) {
Matrix3 *d_A, *d_U, *d_sig, *d_V;
hipMalloc(&d_A, sizeof(Matrix3) * (unsigned int)(n));
hipMemcpy(d_A, A, sizeof(Matrix3) * n, hipMemcpyHostToDevice);
hipMalloc(&d_U, sizeof(Matrix3) * (unsigned int)(n));
hipMalloc(&d_sig, sizeof(Matrix3) * (unsigned int)(n));
hipMalloc(&d_V, sizeof(Matrix3) * (unsigned int)(n));
hipLaunchKernelGGL(( test_svd), dim3((n + 127) / 128), dim3(128), 0, 0, n, d_A, d_U, d_sig, d_V);
std::vector<Matrix3> h_U(n), h_sig(n), h_V(n);
hipMemcpy(h_U.data(), d_U, sizeof(Matrix3) * n, hipMemcpyDeviceToHost);
hipMemcpy(h_sig.data(), d_sig, sizeof(Matrix3) * n, hipMemcpyDeviceToHost);
hipMemcpy(h_V.data(), d_V, sizeof(Matrix3) * n, hipMemcpyDeviceToHost);
// Taichi uses column-first storage
for (int p = 0; p < n; p++) {
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
U[p * 12 + 4 * i + j] = h_U[p][j][i];
sig[p * 12 + 4 * i + j] = h_sig[p][j][i];
V[p * 12 + 4 * i + j] = h_V[p][j][i];
}
}
}
}
template <int dim>
__host__ std::vector<real> TStateBase<dim>::fetch_x() {
std::vector<real> host_x(dim * num_particles);
hipMemcpy(host_x.data(), x_storage,
sizeof(TVector<real, dim>) * num_particles,
hipMemcpyDeviceToHost);
return host_x;
}
template <int dim>
__host__ std::vector<real> TStateBase<dim>::fetch_grad_v() {
std::vector<real> host_grad_v(dim * num_particles);
hipMemcpy(host_grad_v.data(), grad_v_storage,
sizeof(TVector<real, dim>) * num_particles,
hipMemcpyDeviceToHost);
return host_grad_v;
}
template <int dim>
__host__ std::vector<real> TStateBase<dim>::fetch_grad_x() {
std::vector<real> host_grad_x(dim * num_particles);
hipMemcpy(host_grad_x.data(), grad_x_storage,
sizeof(TVector<real, dim>) * num_particles,
hipMemcpyDeviceToHost);
return host_grad_x;
}
template <int dim>
void TStateBase<dim>::set_initial_v(float *v) {
hipMemcpy(v_storage, v, sizeof(real) * dim * num_particles,
hipMemcpyHostToDevice);
}
template <int dim>
void TStateBase<dim>::set_initial_F(float *F) {
hipMemcpy(F_storage, F, sizeof(real) * dim * dim * num_particles,
hipMemcpyHostToDevice);
}
template std::vector<float> TStateBase<2>::fetch_x();
template std::vector<float> TStateBase<3>::fetch_x();
template std::vector<float> TStateBase<2>::fetch_grad_x();
template std::vector<float> TStateBase<3>::fetch_grad_x();
template std::vector<float> TStateBase<2>::fetch_grad_v();
template std::vector<float> TStateBase<3>::fetch_grad_v();
template void TStateBase<2>::set_initial_F(float *);
template void TStateBase<3>::set_initial_F(float *);
template void TStateBase<2>::set_initial_v(float *);
template void TStateBase<3>::set_initial_v(float *);
template <int dim>
void set_mpm_bc(void *state_, float *bc) {
auto state = reinterpret_cast<TState<dim> *>(state_);
hipMemcpy(state->grid_bc, bc,
sizeof(TVector<real, dim + 1>) * state->num_cells,
hipMemcpyHostToDevice);
}
template void set_mpm_bc<2>(void *state_, float *bc);
template void set_mpm_bc<3>(void *state_, float *bc);
template <int dim>
void set_mpm_actuation(void *state_, float *act) {
auto state = reinterpret_cast<TState<dim> *>(state_);
hipMemcpy(state->A_storage, act,
sizeof(real) * dim * dim * state->num_particles,
hipMemcpyHostToDevice);
}
template void set_mpm_actuation<2>(void *state_, float *);
template void set_mpm_actuation<3>(void *state_, float *);
| 5f909935c6eff3887a85c92a8b7e773ed83ea9f0.cu | #include "kernels.h"
#include "linalg.h"
#include "state.cuh"
#include <cstdio>
#include <vector>
__global__ void saxpy(int n, real a, real *x, real *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a * x[i] + y[i];
}
}
void saxpy_cuda(int N, real alpha, real *x, real *y) {
real *d_x, *d_y;
cudaMalloc(&d_x, N * sizeof(real));
cudaMalloc(&d_y, N * sizeof(real));
cudaMemcpy(d_x, x, N * sizeof(real), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(real), cudaMemcpyHostToDevice);
saxpy<<<(N + 255) / 256, 256>>>(N, alpha, d_x, d_y);
cudaMemcpy(y, d_y, N * sizeof(real), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
}
__global__ void test_svd(int n,
Matrix3 *A,
Matrix3 *U,
Matrix3 *sig,
Matrix3 *V) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n) {
svd(A[id], U[id], sig[id], V[id]);
}
}
// 3D only..
void test_svd_cuda(int n, real *A, real *U, real *sig, real *V) {
Matrix3 *d_A, *d_U, *d_sig, *d_V;
cudaMalloc(&d_A, sizeof(Matrix3) * (unsigned int)(n));
cudaMemcpy(d_A, A, sizeof(Matrix3) * n, cudaMemcpyHostToDevice);
cudaMalloc(&d_U, sizeof(Matrix3) * (unsigned int)(n));
cudaMalloc(&d_sig, sizeof(Matrix3) * (unsigned int)(n));
cudaMalloc(&d_V, sizeof(Matrix3) * (unsigned int)(n));
test_svd<<<(n + 127) / 128, 128>>>(n, d_A, d_U, d_sig, d_V);
std::vector<Matrix3> h_U(n), h_sig(n), h_V(n);
cudaMemcpy(h_U.data(), d_U, sizeof(Matrix3) * n, cudaMemcpyDeviceToHost);
cudaMemcpy(h_sig.data(), d_sig, sizeof(Matrix3) * n, cudaMemcpyDeviceToHost);
cudaMemcpy(h_V.data(), d_V, sizeof(Matrix3) * n, cudaMemcpyDeviceToHost);
// Taichi uses column-first storage
for (int p = 0; p < n; p++) {
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
U[p * 12 + 4 * i + j] = h_U[p][j][i];
sig[p * 12 + 4 * i + j] = h_sig[p][j][i];
V[p * 12 + 4 * i + j] = h_V[p][j][i];
}
}
}
}
template <int dim>
__host__ std::vector<real> TStateBase<dim>::fetch_x() {
std::vector<real> host_x(dim * num_particles);
cudaMemcpy(host_x.data(), x_storage,
sizeof(TVector<real, dim>) * num_particles,
cudaMemcpyDeviceToHost);
return host_x;
}
template <int dim>
__host__ std::vector<real> TStateBase<dim>::fetch_grad_v() {
std::vector<real> host_grad_v(dim * num_particles);
cudaMemcpy(host_grad_v.data(), grad_v_storage,
sizeof(TVector<real, dim>) * num_particles,
cudaMemcpyDeviceToHost);
return host_grad_v;
}
template <int dim>
__host__ std::vector<real> TStateBase<dim>::fetch_grad_x() {
std::vector<real> host_grad_x(dim * num_particles);
cudaMemcpy(host_grad_x.data(), grad_x_storage,
sizeof(TVector<real, dim>) * num_particles,
cudaMemcpyDeviceToHost);
return host_grad_x;
}
template <int dim>
void TStateBase<dim>::set_initial_v(float *v) {
cudaMemcpy(v_storage, v, sizeof(real) * dim * num_particles,
cudaMemcpyHostToDevice);
}
template <int dim>
void TStateBase<dim>::set_initial_F(float *F) {
cudaMemcpy(F_storage, F, sizeof(real) * dim * dim * num_particles,
cudaMemcpyHostToDevice);
}
template std::vector<float> TStateBase<2>::fetch_x();
template std::vector<float> TStateBase<3>::fetch_x();
template std::vector<float> TStateBase<2>::fetch_grad_x();
template std::vector<float> TStateBase<3>::fetch_grad_x();
template std::vector<float> TStateBase<2>::fetch_grad_v();
template std::vector<float> TStateBase<3>::fetch_grad_v();
template void TStateBase<2>::set_initial_F(float *);
template void TStateBase<3>::set_initial_F(float *);
template void TStateBase<2>::set_initial_v(float *);
template void TStateBase<3>::set_initial_v(float *);
template <int dim>
void set_mpm_bc(void *state_, float *bc) {
auto state = reinterpret_cast<TState<dim> *>(state_);
cudaMemcpy(state->grid_bc, bc,
sizeof(TVector<real, dim + 1>) * state->num_cells,
cudaMemcpyHostToDevice);
}
template void set_mpm_bc<2>(void *state_, float *bc);
template void set_mpm_bc<3>(void *state_, float *bc);
template <int dim>
void set_mpm_actuation(void *state_, float *act) {
auto state = reinterpret_cast<TState<dim> *>(state_);
cudaMemcpy(state->A_storage, act,
sizeof(real) * dim * dim * state->num_particles,
cudaMemcpyHostToDevice);
}
template void set_mpm_actuation<2>(void *state_, float *);
template void set_mpm_actuation<3>(void *state_, float *);
|
3de54191e726c7ca29a9e8310a72053d1ab9e01f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef>
#include <cstdint>
#include <transform/jit/operation-udf.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cuda/std/climits>
#include <cuda/std/cstddef>
#include <cuda/std/limits>
#include <cuda/std/tuple>
#include <cuda/std/type_traits>
namespace cudf {
namespace transformation {
namespace jit {
template <typename T>
struct Masked {
T value;
bool valid;
};
template <typename TypeIn, typename MaskType, typename OffsetType>
__device__ auto make_args(cudf::size_type id, TypeIn in_ptr, MaskType in_mask, OffsetType in_offset)
{
bool valid = in_mask ? cudf::bit_is_set(in_mask, in_offset + id) : true;
return cuda::std::make_tuple(in_ptr[id], valid);
}
template <typename InType, typename MaskType, typename OffsetType, typename... Arguments>
__device__ auto make_args(cudf::size_type id,
InType in_ptr,
MaskType in_mask, // in practice, always cudf::bitmask_type const*
OffsetType in_offset, // in practice, always cudf::size_type
Arguments... args)
{
bool valid = in_mask ? cudf::bit_is_set(in_mask, in_offset + id) : true;
return cuda::std::tuple_cat(cuda::std::make_tuple(in_ptr[id], valid), make_args(id, args...));
}
template <typename TypeOut, typename... Arguments>
__global__ void generic_udf_kernel(cudf::size_type size,
TypeOut* out_data,
bool* out_mask,
Arguments... args)
{
int const tid = threadIdx.x;
int const blkid = blockIdx.x;
int const blksz = blockDim.x;
int const gridsz = gridDim.x;
int const start = tid + blkid * blksz;
int const step = blksz * gridsz;
Masked<TypeOut> output;
for (cudf::size_type i = start; i < size; i += step) {
auto func_args = cuda::std::tuple_cat(
cuda::std::make_tuple(&output.value),
make_args(i, args...) // passed int64*, bool*, int64, int64*, bool*, int64
);
cuda::std::apply(GENERIC_OP, func_args);
out_data[i] = output.value;
out_mask[i] = output.valid;
}
}
} // namespace jit
} // namespace transformation
} // namespace cudf
| 3de54191e726c7ca29a9e8310a72053d1ab9e01f.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef>
#include <cstdint>
#include <transform/jit/operation-udf.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cuda/std/climits>
#include <cuda/std/cstddef>
#include <cuda/std/limits>
#include <cuda/std/tuple>
#include <cuda/std/type_traits>
namespace cudf {
namespace transformation {
namespace jit {
template <typename T>
struct Masked {
T value;
bool valid;
};
template <typename TypeIn, typename MaskType, typename OffsetType>
__device__ auto make_args(cudf::size_type id, TypeIn in_ptr, MaskType in_mask, OffsetType in_offset)
{
bool valid = in_mask ? cudf::bit_is_set(in_mask, in_offset + id) : true;
return cuda::std::make_tuple(in_ptr[id], valid);
}
template <typename InType, typename MaskType, typename OffsetType, typename... Arguments>
__device__ auto make_args(cudf::size_type id,
InType in_ptr,
MaskType in_mask, // in practice, always cudf::bitmask_type const*
OffsetType in_offset, // in practice, always cudf::size_type
Arguments... args)
{
bool valid = in_mask ? cudf::bit_is_set(in_mask, in_offset + id) : true;
return cuda::std::tuple_cat(cuda::std::make_tuple(in_ptr[id], valid), make_args(id, args...));
}
template <typename TypeOut, typename... Arguments>
__global__ void generic_udf_kernel(cudf::size_type size,
TypeOut* out_data,
bool* out_mask,
Arguments... args)
{
int const tid = threadIdx.x;
int const blkid = blockIdx.x;
int const blksz = blockDim.x;
int const gridsz = gridDim.x;
int const start = tid + blkid * blksz;
int const step = blksz * gridsz;
Masked<TypeOut> output;
for (cudf::size_type i = start; i < size; i += step) {
auto func_args = cuda::std::tuple_cat(
cuda::std::make_tuple(&output.value),
make_args(i, args...) // passed int64*, bool*, int64, int64*, bool*, int64
);
cuda::std::apply(GENERIC_OP, func_args);
out_data[i] = output.value;
out_mask[i] = output.valid;
}
}
} // namespace jit
} // namespace transformation
} // namespace cudf
|
331bcc791a654fee32cf198cec316bbde1ddca83.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* bashCGPU/CUDA
*
https://suzukiiichiro.github.io/search/?keyword=
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -c
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -r
GPU
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
GPU
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define MAX 27
#define THREAD_NUM 96
//
//#define UINT64_C(c) c ## ULL
//
//
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU
typedef struct local
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
unsigned int steps;
}local;
// CPU /
void symmetryOps(unsigned int size,struct local* l)
{
/**
(1) 9090(
180)90(270)
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
(2) 90270
180
180
()
*/
//
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
(3)180()
*/
//
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
/**
CPU -c
*/
// Q
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Q
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// Q
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Q
//
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
//
void symmetry_NR(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
CPU -r
*/
// Q
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Q
symmetryOps(size,l); //
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// Q
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Q
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//
void symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
GPU -g
*/
__device__
struct dlocal
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}dlocal;
__device__ struct dlocal gdl[9999];
// GPU
__host__ __device__
long GPU_symmetryOps(unsigned int size,struct dlocal* l)
{
/**
(1) 9090(
180)90(270)
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT2++;
return 2;
}//end if
}//end if
/**
(2) 90270
180
180
()
*/
//
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT4++;
return 4;
}
}//end if
/**
(3)180()
*/
//
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
return 8;
}
// GPU Q
__host__ __device__
long GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Q
counter+=GPU_symmetryOps(size,l); //
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return 0;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
counter+=GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU Q
__host__ __device__
long GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
counter+=8;
}
}else{
if(row<l->BOUND1){ //
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Q
counter+=GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU -g
__host__ __device__
void GPU_symmetry_R(unsigned int size,struct local* hostLocal)
{
// GPU dlocal
struct dlocal l;
l.TOTAL=l.UNIQUE=l.COUNT2=l.COUNT4=l.COUNT8=0;
unsigned int bit=0;
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.LASTMASK=l.SIDEMASK=0;
l.BOUND1=2;
l.BOUND2=0;
l.board[0]=1;
while(l.BOUND1>1 && l.BOUND1<size-1){
if(l.BOUND1<size-1){
bit=1<<l.BOUND1;
l.board[1]=bit; //Q
//Q
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,&l);
}
l.BOUND1++;
}//end while
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.TOPBIT>>1;
l.SIDEMASK=l.TOPBIT|1;
l.LASTMASK=l.TOPBIT|1;
l.BOUND1=1;
l.BOUND2=size-2;
while(l.BOUND1>0 && l.BOUND2<size-1 && l.BOUND1<l.BOUND2){
if(l.BOUND1<l.BOUND2){
bit=1<<l.BOUND1;
l.board[0]=bit; //Q
//Q
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,&l);
}
l.BOUND1++;
l.BOUND2--;
l.ENDBIT=l.ENDBIT>>1;
l.LASTMASK=l.LASTMASK<<1|l.LASTMASK|l.LASTMASK>>1;
}//ene while
// hostLocal
hostLocal->UNIQUE=l.COUNT2+l.COUNT4+l.COUNT8;
hostLocal->TOTAL=l.COUNT2*2+l.COUNT4*4+l.COUNT8*8;
}
/**
CUDA13
*/
// GPU -n
__device__
int BitBoard_symmetryOps(const unsigned int size,const unsigned int* board,struct local* l)
{
unsigned int own,ptn,you,bit;
//90
if(board[l->BOUND2]==1){ own=1; ptn=2;
while(own<=size-1){ bit=1; you=size-1;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you--; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90180/270 */
if(own>size-1){ return 2; }
}
//180
if(board[size-1]==l->ENDBIT){ own=1; you=size-1-1;
while(own<=size-1){ bit=1; ptn=l->TOPBIT;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; ptn>>=1; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; you--;
}
/** 90180 */
if(own>size-1){ return 4; }
}
//270
if(board[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1;
while(own<=size-1){ bit=1; you=0;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you++; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn>>=1;
}
}
return 8;
}
// GPU -n
__global__
void BitBoard_cuda_kernel_b1(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//
//
//ID
const unsigned int tid=threadIdx.x;
//ID
const unsigned int bid=blockIdx.x;
//ID
const unsigned int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
bitmap[tid][row] =mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
__shared__ unsigned int usum[THREAD_NUM];
//
//GPUsteps_cond
if(idx<_cond){
//_down,_left,_right
//down,left,right
//CPU t_steps
//
// idx
//
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
if(bitmap_tid_row==0){
row--;
}else{
/**11 **********/
if(row+_row<l->BOUND1) {
bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
//
//
bitmap[tid][row]
^=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//?
//
if(row+1==mark){
//TOTAL
//
unique++;
total+=8; //
//}
row--;
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]=mask&~(down[tid][rowP]|left[tid][rowP]|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
usum[tid]=unique;
}else{
//_condtotal
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();
if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();
if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();
if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();
if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();
if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();
if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();
if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();
if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n
__global__
void BitBoard_cuda_kernel_b2(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int* board,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//
//
//ID
unsigned const int tid=threadIdx.x;
//ID
unsigned const int bid=blockIdx.x;
//ID
unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]=mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//
//GPUsteps_cond
if(idx<_cond){
//_down,_left,_right
//down,left,right
//CPU t_steps
//
// idx
//
for(int i=0;i<_row;i++){
c_aBoard[i]=board[idx*_row+i]; //1
}
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
//
//bitmap[tid][row]=00000000
//1
if(bitmap_tid_row==0){
row--;
}else{
/**11 **********/
//
if(row+_row<l->BOUND1){
bitmap_tid_row=bitmap[tid][row]&=~l->SIDEMASK;
//
}else if(row+_row==l->BOUND2) {
if((down_tid_row&l->SIDEMASK)==0){
row--;
continue;
}
if((down_tid_row&l->SIDEMASK)!=l->SIDEMASK){
bitmap_tid_row=bitmap[tid][row]&=l->SIDEMASK;
}
}
int save_bitmap=bitmap[tid][row];
//
//
bitmap[tid][row]^=c_aBoard[row+_row]=bit=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//?
//
if(row+1==mark){
/***11 l->LASTMASK*********************/
if((save_bitmap&l->LASTMASK)==0){
/***12 symmetryOps l->BOUND1,l->BOUND2,l->TOPBIT,l->ENDBIT*****/
int s=BitBoard_symmetryOps(size,c_aBoard,l);
if(s!=0){
//print(size); //print()TOTAL++
//TOTAL
//
unique++;
total+=s; //
}
row--;
}
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
usum[tid]=unique;
}else{
//_condtotal
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n
void BitBoard_backTrack2G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//GPUGPU
/***11 size<8mark2*********************/
unsigned int mark=size>12?size-10:3;
//unsigned int mark=size>11?size-9:3;
if(size<8){ mark=2; }
const unsigned int h_mark=row;
unsigned long totalCond=0;
unsigned int mask=(1<<size)-1;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmap
//stack1
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* hostDown;
hipHostMalloc((void**) &hostDown,sizeof(int)*l->steps);
unsigned int* hostLeft;
hipHostMalloc((void**) &hostLeft,sizeof(int)*l->steps);
unsigned int* hostRight;
hipHostMalloc((void**) &hostRight,sizeof(int)*l->steps);
unsigned int* hostTotal;
hipHostMalloc((void**) &hostTotal,sizeof(int)*l->steps);
unsigned int* hostUnique;
hipHostMalloc((void**) &hostUnique,sizeof(int)*l->steps);
unsigned int* hostBoard;
hipHostMalloc((void**) &hostBoard,sizeof(int)*l->steps*mark);
//device
unsigned int* deviceDown;
hipMalloc((void**) &deviceDown,sizeof(int)*l->steps);
unsigned int* deviceLeft;
hipMalloc((void**) &deviceLeft,sizeof(int)*l->steps);
unsigned int* deviceRight;
hipMalloc((void**) &deviceRight,sizeof(int)*l->steps);
unsigned int* deviceTotal;
hipMalloc((void**) &deviceTotal,sizeof(int)*l->steps/THREAD_NUM);
unsigned int* deviceUnique;
hipMalloc((void**) &deviceUnique,sizeof(int)*l->steps/THREAD_NUM);
unsigned int* deviceBoard;
hipMalloc((void**) &deviceBoard,sizeof(int)*l->steps*mark);
struct local* hostLocal;
hipHostMalloc((void**) &hostLocal,sizeof(struct local)*l->steps);
struct local* deviceLocal;
hipHostMalloc((void**) &deviceLocal,sizeof(struct local)*l->steps);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].steps=l->steps;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//123CPU->row==mark 3
//down,left,righthostDown ,hostLeft,hostRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
unsigned int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
//06GPU
if(bitmap[row]==0){ row--; }
else{//
/***11 *********************/
//
if(row<l->BOUND1){
bitmap[row]&=~l->SIDEMASK;
//
}else if(row==l->BOUND2) {
if((down[row]&l->SIDEMASK)==0){ row--; }
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; }
}
//06SGPU
bitmap[row]^=l->board[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//hostDown,hostLeft,hostRight
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
for(int i=0;i<mark;i++){
hostBoard[totalCond*mark+i]=l->board[i];
}
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==l->steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b2), dim3(l->steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,row,deviceLocal);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//hostDown,hostLeft,hostRight1
// row=2
//hostDown,hostLeft,hostRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
//size-mark GPU totalCond
//steps
//totalCond
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b2), dim3(l->steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,mark,deviceLocal);
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//
hipFree(deviceDown);
hipFree(deviceLeft);
hipFree(deviceRight);
hipFree(deviceTotal);
hipFree(deviceUnique);
hipFree(deviceBoard);
hipFree(deviceLocal);
hipHostFree(hostDown);
hipHostFree(hostLeft);
hipHostFree(hostRight);
hipHostFree(hostTotal);
hipHostFree(hostUnique);
hipHostFree(hostBoard);
hipHostFree(hostLocal);
}
// GPU -n
void BitBoard_backTrack1G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//GPUGPU
/***08 mark3*********************/
const unsigned int mark=size>12?size-10:3;
const unsigned int h_mark=row;
const unsigned int mask=(1<<size)-1;
unsigned long totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmap
//stack1
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
// host
unsigned int* hostDown;
hipHostMalloc((void**) &hostDown,sizeof(int)*l->steps);
unsigned int* hostLeft;
hipHostMalloc((void**) &hostLeft,sizeof(int)*l->steps);
unsigned int* hostRight;
hipHostMalloc((void**) &hostRight,sizeof(int)*l->steps);
unsigned int* hostTotal;
hipHostMalloc((void**) &hostTotal,sizeof(int)*l->steps);
unsigned int* hostUnique;
hipHostMalloc((void**) &hostUnique,sizeof(int)*l->steps);
// device
unsigned int* deviceDown;
hipMalloc((void**) &deviceDown,sizeof(int)*l->steps);
unsigned int* deviceLeft;
hipMalloc((void**) &deviceLeft,sizeof(int)*l->steps);
unsigned int* deviceRight;
hipMalloc((void**) &deviceRight,sizeof(int)*l->steps);
unsigned int* deviceTotal;
hipMalloc((void**) &deviceTotal,sizeof(int)*l->steps/THREAD_NUM);
unsigned int* deviceUnique;
hipMalloc((void**) &deviceUnique,sizeof(int)*l->steps/THREAD_NUM);
//
struct local* hostLocal;
hipHostMalloc((void**) &hostLocal,sizeof(struct local)*l->steps);
struct local* deviceLocal;
hipHostMalloc((void**) &deviceLocal,sizeof(struct local)*l->steps);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].steps=l->steps;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//123CPU->row==mark 3
//down,left,right hostDown,hostLeft,hostRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
//while(row>=0) {
int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
//06GPU
if(bitmap[row]==0){ row--; }
else{//
if(row<l->BOUND1) { /***11 *********************/
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//hostDown,hostLeft,hostRight
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
//
totalCond++;
//GPUGPUstepsGPU
//
//ntotalCondstepsn
//
//totalCond==steps
if(totalCond==l->steps){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b1), dim3(l->steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,row,deviceLocal);
//steps
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==stepsGPU0
//(stepsGPU)
totalCond=0;
}
//hostDown,hostLeft,hostRight1
// row=2
//hostDown,hostLeft,hostRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b1), dim3(l->steps/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,mark,deviceLocal);
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//
hipFree(deviceDown);
hipFree(deviceLeft);
hipFree(deviceRight);
hipFree(deviceTotal);
hipFree(deviceUnique);
hipFree(deviceLocal);
hipHostFree(hostDown);
hipHostFree(hostLeft);
hipHostFree(hostRight);
hipHostFree(hostTotal);
hipHostFree(hostUnique);
hipHostFree(hostLocal);
}
// GPU -n
void BitBoard_build(const unsigned int size,int steps)
{
if(size<=0||size>32){return;}
/**
int unsigned
total: TOTAL
*/
struct local l; //GPU
l.steps=steps;
unsigned int bit=1;
l.board[0]=1;
unsigned int left=bit<<1,down=bit,right=bit>>1;
/**
232
*/
for(l.BOUND1=2;l.BOUND1<size-1;l.BOUND1++){
l.board[1]=bit=(1<<l.BOUND1);
BitBoard_backTrack1G(size,2,(left|bit)<<1,(down|bit),(right|bit)>>1,&l);
}
l.TOPBIT=1<<(size-1);
l.SIDEMASK=l.LASTMASK=(l.TOPBIT|1);
l.ENDBIT=(l.TOPBIT>>1);
/**
12
1/2 n=8 1,2,3 1/2+1 n=9 1,2,3,4
*/
for(l.BOUND1=1,l.BOUND2=size-1-1;l.BOUND1<l.BOUND2;l.BOUND1++,l.BOUND2--){
l.board[0]=bit=(1<<l.BOUND1);
BitBoard_backTrack2G(size,1,bit<<1,bit,bit>>1,&l);
l.LASTMASK|=l.LASTMASK>>1|l.LASTMASK<<1;
l.ENDBIT>>=1;
}
}
// CUDA
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
unsigned int i;
for(i=0;i<count;++i){
struct hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuBitBoard=false;
unsigned int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuBitBoard=true;}
else{ gpuBitBoard=true; } //gpu
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPU \n");
printf(" -c: CPU \n");
printf(" -g: GPU \n");
printf(" -n: GPU \n");
}
if(cpur){ printf("\n\n \n"); }
else if(cpu){ printf("\n\n \n"); }
else if(gpu){ printf("\n\n GPU\n"); }
else if(gpuBitBoard){ printf("\n\n GPU \n"); }
if(cpu||cpur)
{
unsigned int min=4;
unsigned int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0,NULL);//
if(cpur){ //
symmetry_R(size,&l);
}
if(cpu){ //
symmetry_NR(size,&l);
}
//
gettimeofday(&t1,NULL);//
unsigned int ss;
unsigned int ms;
unsigned int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuBitBoard)
{
int steps=24576;
if(!InitCUDA()){return 0;}
unsigned int min=4;
unsigned int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL);
if(gpu){
TOTAL=UNIQUE=0;
local l[MAX];
GPU_symmetry_R(size,&l[0]);
TOTAL=l->TOTAL;
UNIQUE=l->UNIQUE;
}else if(gpuBitBoard){
TOTAL=UNIQUE=0;
BitBoard_build(size,steps);
}
gettimeofday(&t1,NULL);
unsigned int ss;
unsigned int ms;
unsigned int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
| 331bcc791a654fee32cf198cec316bbde1ddca83.cu | /**
*
* bash版対称解除法のC言語版のGPU/CUDA移植版
*
詳しい説明はこちらをどうぞ
https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題
非再帰でのコンパイルと実行
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -c
再帰でのコンパイルと実行
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -r
GPU で並列処理せずに実行
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
GPU で並列処理で実行(ビットボード)
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define MAX 27
#define THREAD_NUM 96
// システムによって以下のマクロが必要であればコメントを外してください。
//#define UINT64_C(c) c ## ULL
//
// グローバル変数
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU で使うローカル構造体
typedef struct local
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
unsigned int steps;
}local;
// CPU 再帰/非再帰共通 対称解除法
void symmetryOps(unsigned int size,struct local* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
/**
CPU -c
*/
// 非再帰 角にQがないときのバックトラック
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //上部サイド枝刈り
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //下部サイド枝刈り
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Qを配置
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //対称解除法
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// 非再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Qを配置
//クイーンが配置可能な位置を表す
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
// 非再帰 対称解除法
void symmetry_NR(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
CPU -r
*/
// 再帰 角にQがないときのバックトラック
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// 再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// 再帰 対称解除法
void symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
GPU -g
*/
__device__
struct dlocal
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}dlocal;
__device__ struct dlocal gdl[9999];
// GPU 対称解除法
__host__ __device__
long GPU_symmetryOps(unsigned int size,struct dlocal* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return 2;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return 4;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
return 8;
}
// GPU 角にQがないときのバックトラック
__host__ __device__
long GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
counter+=GPU_symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return 0;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
counter+=GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU 角にQがあるときのバックトラック
__host__ __device__
long GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
counter+=8;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
counter+=GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU 対称解除法 -g の実行時のみ呼び出されます
__host__ __device__
void GPU_symmetry_R(unsigned int size,struct local* hostLocal)
{
// GPU内部で使うための dlocal構造体
struct dlocal l;
l.TOTAL=l.UNIQUE=l.COUNT2=l.COUNT4=l.COUNT8=0;
unsigned int bit=0;
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.LASTMASK=l.SIDEMASK=0;
l.BOUND1=2;
l.BOUND2=0;
l.board[0]=1;
while(l.BOUND1>1 && l.BOUND1<size-1){
if(l.BOUND1<size-1){
bit=1<<l.BOUND1;
l.board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,&l);
}
l.BOUND1++;
}//end while
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.TOPBIT>>1;
l.SIDEMASK=l.TOPBIT|1;
l.LASTMASK=l.TOPBIT|1;
l.BOUND1=1;
l.BOUND2=size-2;
while(l.BOUND1>0 && l.BOUND2<size-1 && l.BOUND1<l.BOUND2){
if(l.BOUND1<l.BOUND2){
bit=1<<l.BOUND1;
l.board[0]=bit; //Qを配置
//角にQがないときのバックトラック
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,&l);
}
l.BOUND1++;
l.BOUND2--;
l.ENDBIT=l.ENDBIT>>1;
l.LASTMASK=l.LASTMASK<<1|l.LASTMASK|l.LASTMASK>>1;
}//ene while
// 集計値は hostLocalへ代入
hostLocal->UNIQUE=l.COUNT2+l.COUNT4+l.COUNT8;
hostLocal->TOTAL=l.COUNT2*2+l.COUNT4*4+l.COUNT8*8;
}
/**
CUDA13
*/
// GPU -n 対称解除法
__device__
int BitBoard_symmetryOps(const unsigned int size,const unsigned int* board,struct local* l)
{
unsigned int own,ptn,you,bit;
//90度回転
if(board[l->BOUND2]==1){ own=1; ptn=2;
while(own<=size-1){ bit=1; you=size-1;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you--; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90度回転して同型なら180度/270度回転も同型である */
if(own>size-1){ return 2; }
}
//180度回転
if(board[size-1]==l->ENDBIT){ own=1; you=size-1-1;
while(own<=size-1){ bit=1; ptn=l->TOPBIT;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; ptn>>=1; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; you--;
}
/** 90度回転が同型でなくても180度回転が同型である事もある */
if(own>size-1){ return 4; }
}
//270度回転
if(board[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1;
while(own<=size-1){ bit=1; you=0;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you++; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn>>=1;
}
}
return 8;
}
// GPU -n Qが角にある場合のバックトラック内の再帰処理をカーネルで行う
__global__
void BitBoard_cuda_kernel_b1(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
const unsigned int tid=threadIdx.x;
//グリッド内のブロックID
const unsigned int bid=blockIdx.x;
//全体通してのID
const unsigned int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
bitmap[tid][row] =mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
__shared__ unsigned int usum[THREAD_NUM];
//余分なスレッドは動かさない
//GPUはsteps数起動するが_cond以上は空回しする
if(idx<_cond){
//_down,_left,_rightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はsteps個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
if(bitmap_tid_row==0){
row--;
}else{
/**11 枝刈り**********/
if(row+_row<l->BOUND1) {
bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
//クイーンを置く
//置く場所があるかどうか
bitmap[tid][row]
^=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
//ホストに戻す配列にTOTALを入れる
//スレッドが1つの場合は配列は1個
unique++;
total+=8; //対称解除で得られた解数を加算
//}
row--;
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]=mask&~(down[tid][rowP]|left[tid][rowP]|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
usum[tid]=unique;
}else{
//_cond未満は空回しするのでtotalは加算しない
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();
if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();
if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();
if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();
if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();
if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();
if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();
if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();
if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n Qが角にない場合のバックトラック内の再帰処理をカーネルで行う
__global__
void BitBoard_cuda_kernel_b2(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int* board,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
unsigned const int tid=threadIdx.x;
//グリッド内のブロックID
unsigned const int bid=blockIdx.x;
//全体通してのID
unsigned const int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightからbitmapを出す
bitmap[tid][row]=mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//余分なスレッドは動かさない
//GPUはsteps数起動するが_cond以上は空回しする
if(idx<_cond){
//_down,_left,_rightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はsteps個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
for(int i=0;i<_row;i++){
c_aBoard[i]=board[idx*_row+i]; //2次元配列だが1次元的に利用
}
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
//
//bitmap[tid][row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
if(bitmap_tid_row==0){
row--;
}else{
/**11 枝刈り追加**********/
//【枝刈り】上部サイド枝刈り
if(row+_row<l->BOUND1){
bitmap_tid_row=bitmap[tid][row]&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row+_row==l->BOUND2) {
if((down_tid_row&l->SIDEMASK)==0){
row--;
continue;
}
if((down_tid_row&l->SIDEMASK)!=l->SIDEMASK){
bitmap_tid_row=bitmap[tid][row]&=l->SIDEMASK;
}
}
int save_bitmap=bitmap[tid][row];
//クイーンを置く
//置く場所があるかどうか
bitmap[tid][row]^=c_aBoard[row+_row]=bit=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
/***11 l->LASTMASK枝刈り*********************/
if((save_bitmap&l->LASTMASK)==0){
/***12 symmetryOps 省力化のためl->BOUND1,l->BOUND2,l->TOPBIT,l->ENDBITを渡す*****/
int s=BitBoard_symmetryOps(size,c_aBoard,l);
if(s!=0){
//print(size); //print()でTOTALを++しない
//ホストに戻す配列にTOTALを入れる
//スレッドが1つの場合は配列は1個
unique++;
total+=s; //対称解除で得られた解数を加算
}
row--;
}
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
usum[tid]=unique;
}else{
//_cond未満は空回しするのでtotalは加算しない
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n Qが角にない
void BitBoard_backTrack2G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
/***11 size<8の時はmarkが2*********************/
unsigned int mark=size>12?size-10:3;
//unsigned int mark=size>11?size-9:3;
if(size<8){ mark=2; }
const unsigned int h_mark=row;
unsigned long totalCond=0;
unsigned int mask=(1<<size)-1;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* hostDown;
cudaMallocHost((void**) &hostDown,sizeof(int)*l->steps);
unsigned int* hostLeft;
cudaMallocHost((void**) &hostLeft,sizeof(int)*l->steps);
unsigned int* hostRight;
cudaMallocHost((void**) &hostRight,sizeof(int)*l->steps);
unsigned int* hostTotal;
cudaMallocHost((void**) &hostTotal,sizeof(int)*l->steps);
unsigned int* hostUnique;
cudaMallocHost((void**) &hostUnique,sizeof(int)*l->steps);
unsigned int* hostBoard;
cudaMallocHost((void**) &hostBoard,sizeof(int)*l->steps*mark);
//device
unsigned int* deviceDown;
cudaMalloc((void**) &deviceDown,sizeof(int)*l->steps);
unsigned int* deviceLeft;
cudaMalloc((void**) &deviceLeft,sizeof(int)*l->steps);
unsigned int* deviceRight;
cudaMalloc((void**) &deviceRight,sizeof(int)*l->steps);
unsigned int* deviceTotal;
cudaMalloc((void**) &deviceTotal,sizeof(int)*l->steps/THREAD_NUM);
unsigned int* deviceUnique;
cudaMalloc((void**) &deviceUnique,sizeof(int)*l->steps/THREAD_NUM);
unsigned int* deviceBoard;
cudaMalloc((void**) &deviceBoard,sizeof(int)*l->steps*mark);
struct local* hostLocal;
cudaMallocHost((void**) &hostLocal,sizeof(struct local)*l->steps);
struct local* deviceLocal;
cudaMallocHost((void**) &deviceLocal,sizeof(struct local)*l->steps);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].steps=l->steps;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報をhostDown ,hostLeft,hostRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
unsigned int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
else{//おける場所があれば進む
/***11 枝刈り追加*********************/
//【枝刈り】上部サイド枝刈り
if(row<l->BOUND1){
bitmap[row]&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row==l->BOUND2) {
if((down[row]&l->SIDEMASK)==0){ row--; }
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; }
}
//06SGPU
bitmap[row]^=l->board[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//hostDown,hostLeft,hostRightに格納する
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
for(int i=0;i<mark;i++){
hostBoard[totalCond*mark+i]=l->board[i];
}
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==stepsの場合だけこの中へ
if(totalCond==l->steps){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
BitBoard_cuda_kernel_b2<<<l->steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,row,deviceLocal);
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもsteps数分だけGPUを起動できる)
totalCond=0;
}
//hostDown,hostLeft,hostRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//hostDown,hostLeft,hostRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
//size-mark は何行GPUを実行するか totalCondはスレッド数
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは
//totalCondの数だけでそれ以外は空回しになる
BitBoard_cuda_kernel_b2<<<l->steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,mark,deviceLocal);
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//
cudaFree(deviceDown);
cudaFree(deviceLeft);
cudaFree(deviceRight);
cudaFree(deviceTotal);
cudaFree(deviceUnique);
cudaFree(deviceBoard);
cudaFree(deviceLocal);
cudaFreeHost(hostDown);
cudaFreeHost(hostLeft);
cudaFreeHost(hostRight);
cudaFreeHost(hostTotal);
cudaFreeHost(hostUnique);
cudaFreeHost(hostBoard);
cudaFreeHost(hostLocal);
}
// GPU -n Qが角にある
void BitBoard_backTrack1G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
/***08 クイーンを2行目まで固定で置くためmarkが3以上必要*********************/
const unsigned int mark=size>12?size-10:3;
const unsigned int h_mark=row;
const unsigned int mask=(1<<size)-1;
unsigned long totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
// host
unsigned int* hostDown;
cudaMallocHost((void**) &hostDown,sizeof(int)*l->steps);
unsigned int* hostLeft;
cudaMallocHost((void**) &hostLeft,sizeof(int)*l->steps);
unsigned int* hostRight;
cudaMallocHost((void**) &hostRight,sizeof(int)*l->steps);
unsigned int* hostTotal;
cudaMallocHost((void**) &hostTotal,sizeof(int)*l->steps);
unsigned int* hostUnique;
cudaMallocHost((void**) &hostUnique,sizeof(int)*l->steps);
// device
unsigned int* deviceDown;
cudaMalloc((void**) &deviceDown,sizeof(int)*l->steps);
unsigned int* deviceLeft;
cudaMalloc((void**) &deviceLeft,sizeof(int)*l->steps);
unsigned int* deviceRight;
cudaMalloc((void**) &deviceRight,sizeof(int)*l->steps);
unsigned int* deviceTotal;
cudaMalloc((void**) &deviceTotal,sizeof(int)*l->steps/THREAD_NUM);
unsigned int* deviceUnique;
cudaMalloc((void**) &deviceUnique,sizeof(int)*l->steps/THREAD_NUM);
// 構造体の宣言とコピー
struct local* hostLocal;
cudaMallocHost((void**) &hostLocal,sizeof(struct local)*l->steps);
struct local* deviceLocal;
cudaMallocHost((void**) &deviceLocal,sizeof(struct local)*l->steps);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].steps=l->steps;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報を hostDown,hostLeft,hostRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
//while(row>=0) {
int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
else{//おける場所があれば進む
if(row<l->BOUND1) { /***11 枝刈り*********************/
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//hostDown,hostLeft,hostRightに格納する
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。stepsはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがstepsを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==stepsの場合だけこの中へ
if(totalCond==l->steps){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
BitBoard_cuda_kernel_b1<<<l->steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,row,deviceLocal);
//steps数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==stepsルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもsteps数分だけGPUを起動できる)
totalCond=0;
}
//hostDown,hostLeft,hostRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//hostDown,hostLeft,hostRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
BitBoard_cuda_kernel_b1<<<l->steps/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,mark,deviceLocal);
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->steps/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->steps/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//開放
cudaFree(deviceDown);
cudaFree(deviceLeft);
cudaFree(deviceRight);
cudaFree(deviceTotal);
cudaFree(deviceUnique);
cudaFree(deviceLocal);
cudaFreeHost(hostDown);
cudaFreeHost(hostLeft);
cudaFreeHost(hostRight);
cudaFreeHost(hostTotal);
cudaFreeHost(hostUnique);
cudaFreeHost(hostLocal);
}
// GPU -n ビットボードの実行 角にQがある・ないの分岐を行う
void BitBoard_build(const unsigned int size,int steps)
{
if(size<=0||size>32){return;}
/**
int型は unsigned とする
total: グローバル変数TOTALへのアクセスを極小化する
*/
struct local l; //GPU で扱う構造体
l.steps=steps;
unsigned int bit=1;
l.board[0]=1;
unsigned int left=bit<<1,down=bit,right=bit>>1;
/**
2行目は右から3列目から左端から2列目まで
*/
for(l.BOUND1=2;l.BOUND1<size-1;l.BOUND1++){
l.board[1]=bit=(1<<l.BOUND1);
BitBoard_backTrack1G(size,2,(left|bit)<<1,(down|bit),(right|bit)>>1,&l);
}
l.TOPBIT=1<<(size-1);
l.SIDEMASK=l.LASTMASK=(l.TOPBIT|1);
l.ENDBIT=(l.TOPBIT>>1);
/**
1行目右から2列目から
偶数個は1/2 n=8 なら 1,2,3 奇数個は1/2+1 n=9 なら 1,2,3,4
*/
for(l.BOUND1=1,l.BOUND2=size-1-1;l.BOUND1<l.BOUND2;l.BOUND1++,l.BOUND2--){
l.board[0]=bit=(1<<l.BOUND1);
BitBoard_backTrack2G(size,1,bit<<1,bit,bit>>1,&l);
l.LASTMASK|=l.LASTMASK>>1|l.LASTMASK<<1;
l.ENDBIT>>=1;
}
}
// CUDA 初期化
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
unsigned int i;
for(i=0;i<count;++i){
struct cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//メイン
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuBitBoard=false;
unsigned int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuBitBoard=true;}
else{ gpuBitBoard=true; } //デフォルトをgpuとする
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPU 再帰\n");
printf(" -c: CPU 非再帰\n");
printf(" -g: GPU 再帰\n");
printf(" -n: GPU ビットボード\n");
}
if(cpur){ printf("\n\n対称解除法 再帰 \n"); }
else if(cpu){ printf("\n\n対称解除法 非再帰 \n"); }
else if(gpu){ printf("\n\n対称解除法 GPU\n"); }
else if(gpuBitBoard){ printf("\n\n対称解除法 GPUビットボード \n"); }
if(cpu||cpur)
{
unsigned int min=4;
unsigned int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0,NULL);//計測開始
if(cpur){ //再帰
symmetry_R(size,&l);
}
if(cpu){ //非再帰
symmetry_NR(size,&l);
}
//
gettimeofday(&t1,NULL);//計測終了
unsigned int ss;
unsigned int ms;
unsigned int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuBitBoard)
{
int steps=24576;
if(!InitCUDA()){return 0;}
unsigned int min=4;
unsigned int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL);
if(gpu){
TOTAL=UNIQUE=0;
local l[MAX];
GPU_symmetry_R(size,&l[0]);
TOTAL=l->TOTAL;
UNIQUE=l->UNIQUE;
}else if(gpuBitBoard){
TOTAL=UNIQUE=0;
BitBoard_build(size,steps);
}
gettimeofday(&t1,NULL);
unsigned int ss;
unsigned int ms;
unsigned int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
3824ca9f474250e2d94dc5f8065d61e89689c640.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <functional>
using std::tr1::function;
static const int threadsPerBlock = 256;
__global__ void reductionByAdd(int *a, int *res, int elCnt) {
int firstThreadBlockId = blockIdx.x * blockDim.x;
int threadId = firstThreadBlockId + threadIdx.x;
for(int i = 1; i < blockDim.x; i <<= 1) {
if(threadId % (2 * i) == 0 && threadId < elCnt && threadId + i < elCnt) {
a[threadId] += a[threadId + i];
}
__syncthreads();
}
if(threadId == firstThreadBlockId)
res[blockIdx.x] = a[firstThreadBlockId];
}
__global__ void reductionByAddWithShared(int *a, int *res, int elCnt) {
extern __shared__ int sdata[];
int threadId = threadIdx.x;
int glThreadId = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadId] = glThreadId < elCnt ? a[glThreadId] : 0;
__syncthreads();
for (int i = 1; i < blockDim.x; i <<= 1) {
if(threadId % (2 * i) == 0 && threadId + i < blockDim.x) {
sdata[threadId] += sdata[threadId + i];
}
__syncthreads();
}
if(threadId == 0)
res[blockIdx.x] = sdata[0];
}
void checkCudaError(hipError_t error) {
if(error != hipSuccess)
{
fprintf(stderr, "Failed! (error code %s)\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
__host__ void recursiveAddReduction(int *a, int *res, int elCnt, function<void (int, int, int *, int *n, int)> reductionImpl) {
int blocksPerGrid = (elCnt + threadsPerBlock - 1) / threadsPerBlock;
size_t sizeRes = blocksPerGrid * sizeof(int);
//printf("%d\n", blocksPerGrid);
int *result = NULL;
hipError_t error = hipMalloc((void**)&result, sizeRes);
checkCudaError(error);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float time;
hipEventRecord(start, 0);
reductionImpl(blocksPerGrid, threadsPerBlock, a, result, elCnt);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Time is : %f\n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
if(elCnt > threadsPerBlock) {
recursiveAddReduction(result, res, blocksPerGrid, reductionImpl);
} else {
error = hipMemcpy(res, result, sizeof(int), hipMemcpyDeviceToHost);
checkCudaError(error);
}
error = hipFree(result);
checkCudaError(error);
error = hipGetLastError();
checkCudaError(error);
}
void resultVerification(int redRes, int *m, int cnt) {
int rightRes = 0;
for (int i = 0; i < cnt; ++i) {
rightRes += m[i];
}
printf("%d\n", rightRes);
printf("%d\n", redRes);
int abSub = abs(rightRes - redRes);
printf("%d\n", abSub);
if (abSub != 0) {
fprintf(stderr, "Result verification failed!\n");
exit(EXIT_FAILURE);
}
}
int main() {
int elementCnt = 400000; //4M
size_t size = elementCnt * sizeof(int);
int *hosta = (int *)malloc(size);
int *ans = (int *)malloc(sizeof(int));
if(hosta == NULL || ans == NULL) {
fprintf(stderr, "Failed to allocate host data!\n");
exit(EXIT_FAILURE);
}
for(int i = 0; i < elementCnt; ++i) {
hosta[i] = rand() % 100;
}
int *deva = NULL;
hipError_t error = hipMalloc((void**)&deva, size);
checkCudaError(error);
error = hipMemcpy(deva, hosta, size, hipMemcpyHostToDevice);
checkCudaError(error);
printf("With Shared mem:\n");
recursiveAddReduction(deva, ans, elementCnt, [] (int blocksPerGrid, int threadsPerBlock, int *a, int *res, int elCnt) -> void
{
hipLaunchKernelGGL(( reductionByAddWithShared), dim3(blocksPerGrid), dim3(threadsPerBlock), threadsPerBlock*sizeof(int), 0, a, res, elCnt);
});
resultVerification(*ans, hosta, elementCnt);
*ans = -1;
printf("\nOnly global mem:\n");
recursiveAddReduction(deva, ans, elementCnt, [] (int blocksPerGrid, int threadsPerBlock, int *a, int *res, int elCnt) -> void
{
hipLaunchKernelGGL(( reductionByAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a, res, elCnt);
});
resultVerification(*ans, hosta, elementCnt);
error = hipFree(deva);
checkCudaError(error);
free(hosta);
free(ans);
error = hipDeviceReset();
checkCudaError(error);
printf("Done\n");
return 0;
} | 3824ca9f474250e2d94dc5f8065d61e89689c640.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <functional>
using std::tr1::function;
static const int threadsPerBlock = 256;
__global__ void reductionByAdd(int *a, int *res, int elCnt) {
int firstThreadBlockId = blockIdx.x * blockDim.x;
int threadId = firstThreadBlockId + threadIdx.x;
for(int i = 1; i < blockDim.x; i <<= 1) {
if(threadId % (2 * i) == 0 && threadId < elCnt && threadId + i < elCnt) {
a[threadId] += a[threadId + i];
}
__syncthreads();
}
if(threadId == firstThreadBlockId)
res[blockIdx.x] = a[firstThreadBlockId];
}
__global__ void reductionByAddWithShared(int *a, int *res, int elCnt) {
extern __shared__ int sdata[];
int threadId = threadIdx.x;
int glThreadId = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadId] = glThreadId < elCnt ? a[glThreadId] : 0;
__syncthreads();
for (int i = 1; i < blockDim.x; i <<= 1) {
if(threadId % (2 * i) == 0 && threadId + i < blockDim.x) {
sdata[threadId] += sdata[threadId + i];
}
__syncthreads();
}
if(threadId == 0)
res[blockIdx.x] = sdata[0];
}
void checkCudaError(cudaError error) {
if(error != cudaSuccess)
{
fprintf(stderr, "Failed! (error code %s)\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
__host__ void recursiveAddReduction(int *a, int *res, int elCnt, function<void (int, int, int *, int *n, int)> reductionImpl) {
int blocksPerGrid = (elCnt + threadsPerBlock - 1) / threadsPerBlock;
size_t sizeRes = blocksPerGrid * sizeof(int);
//printf("%d\n", blocksPerGrid);
int *result = NULL;
cudaError_t error = cudaMalloc((void**)&result, sizeRes);
checkCudaError(error);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float time;
cudaEventRecord(start, 0);
reductionImpl(blocksPerGrid, threadsPerBlock, a, result, elCnt);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time is : %f\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if(elCnt > threadsPerBlock) {
recursiveAddReduction(result, res, blocksPerGrid, reductionImpl);
} else {
error = cudaMemcpy(res, result, sizeof(int), cudaMemcpyDeviceToHost);
checkCudaError(error);
}
error = cudaFree(result);
checkCudaError(error);
error = cudaGetLastError();
checkCudaError(error);
}
void resultVerification(int redRes, int *m, int cnt) {
int rightRes = 0;
for (int i = 0; i < cnt; ++i) {
rightRes += m[i];
}
printf("%d\n", rightRes);
printf("%d\n", redRes);
int abSub = abs(rightRes - redRes);
printf("%d\n", abSub);
if (abSub != 0) {
fprintf(stderr, "Result verification failed!\n");
exit(EXIT_FAILURE);
}
}
int main() {
int elementCnt = 400000; //4M
size_t size = elementCnt * sizeof(int);
int *hosta = (int *)malloc(size);
int *ans = (int *)malloc(sizeof(int));
if(hosta == NULL || ans == NULL) {
fprintf(stderr, "Failed to allocate host data!\n");
exit(EXIT_FAILURE);
}
for(int i = 0; i < elementCnt; ++i) {
hosta[i] = rand() % 100;
}
int *deva = NULL;
cudaError_t error = cudaMalloc((void**)&deva, size);
checkCudaError(error);
error = cudaMemcpy(deva, hosta, size, cudaMemcpyHostToDevice);
checkCudaError(error);
printf("With Shared mem:\n");
recursiveAddReduction(deva, ans, elementCnt, [] (int blocksPerGrid, int threadsPerBlock, int *a, int *res, int elCnt) -> void
{
reductionByAddWithShared<<<blocksPerGrid, threadsPerBlock, threadsPerBlock*sizeof(int)>>>(a, res, elCnt);
});
resultVerification(*ans, hosta, elementCnt);
*ans = -1;
printf("\nOnly global mem:\n");
recursiveAddReduction(deva, ans, elementCnt, [] (int blocksPerGrid, int threadsPerBlock, int *a, int *res, int elCnt) -> void
{
reductionByAdd<<<blocksPerGrid, threadsPerBlock>>>(a, res, elCnt);
});
resultVerification(*ans, hosta, elementCnt);
error = cudaFree(deva);
checkCudaError(error);
free(hosta);
free(ans);
error = cudaDeviceReset();
checkCudaError(error);
printf("Done\n");
return 0;
} |
dd1351bec9c1e86d851dc92eb169cd3f5127149f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
#if __CUDA_ARCH__ >= 700
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
// 32x64x16 warp tile
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 64+16;
const uint stdB = 64+16;
const uint stdC = 512+4;
__shared__ float fShare[stdC*16];
ehalf* hShare = (ehalf*)fShare;
uint2* Lut2s = (uint2*)&fShare[stdC*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint tx = tid % 8;
uint ty = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 256)
{
uint2 entry = Lut[i];
entry.x *= 64*64; // 4096 entries of A per block
entry.y *= szHeadState*64; // 64 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint storAB = ty*stdA + tx*8; // assume stdA == stdB
uint loadA = fragmentA<OP_A,m16n16k16>::get_idx(tid, stdA, (tid & 192)*(OP_A == OP_N ? 1 : stdA)*16/64 + (tid & 32)*(OP_A == OP_N ? stdA : 1));
uint loadB = fragmentB<OP_N,m16n16k16>::get_idx(tid, stdB, (tid & 192)*stdB*16/64 + stdA*64);
uint b = idx_N*64 + tx*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadState + ty*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,m16n16k16> fragC[2][4];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b32 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x + 0*64];
uint4 a32 = *(uint4*)&A[entry.x + 32*64];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b32 = *(uint4*)&B[entry.y + 32*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storAB + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storAB + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storAB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storAB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_A,m16n16k16> fragA[2];
fragmentB<OP_N,m16n16k16> fragB[4];
for (int i = 0; i < 2; i++)
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
for (int i = 0; i < 4; i++)
fragB[i].load(hShare, loadB + i*16, stdB);
for (int i = 0; i < 2; i++)
for (int j = 0; j < 4; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m16n16k16>::get_idx(tid, stdC, (tid & 224)*2);
uint offsetC = idx_B*szCtxHeadState + (idx_M*64 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 4; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*32 + i*16)] = to_half4(
ew_add(
ew_add(
*(float4*)&fShare[loadC + j*64 + 0*128],
*(float4*)&fShare[loadC + j*64 + 1*128]),
ew_add(
*(float4*)&fShare[loadC + j*64 + 2*128],
*(float4*)&fShare[loadC + j*64 + 3*128])
)
);
}
}
}
else
{
uint c = idx_N*64 + tx*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*64 + ty)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*32] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x64x32_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 48;
const uint stdB = 80;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*32];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 128)
{
uint2 entry = Lut[i];
entry.x *= 32*32; // 1024 entries of A per block
entry.y *= szHeadState*32; // 32 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*8;
uint storB = tyb*stdB + txb*8 + stdA*32;
uint loadA = fragmentA<OP_A,m16n16k16>::get_idx(tid, stdA, (tid & 64)*(OP_A == OP_N ? 1 : stdA)*16/64);
uint loadB = fragmentB<OP_N,m16n16k16>::get_idx(tid, stdB, (tid & 64)*stdB*16/64 + (tid & 32) + stdA*32);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadState + tyb*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,m16n16k16> fragC[2][2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b16 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b16 = *(uint4*)&B[entry.y + 16*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storA] = a00;
*(uint4*)&hShare[storB + 0*stdB] = b00;
*(uint4*)&hShare[storB + 16*stdB] = b16;
__syncthreads();
fragmentA<OP_A,m16n16k16> fragA[2];
fragmentB<OP_N,m16n16k16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
fragB[i].load(hShare, loadB + i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m16n16k16>::get_idx(tid, stdC, tid & 96);
uint offsetC = idx_B*szCtxHeadState + (idx_M*32 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*8 + i*16)] = to_half4(ew_add(
*(float4*)&fShare[loadC + stdC*j*8 + 0],
*(float4*)&fShare[loadC + stdC*j*8 + 64]));
}
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*16] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x64x16_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 16;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 16*16; // 256 entries of A per block
entry.y *= szHeadState*16; // 16 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*4;
uint storB = tyb*stdB + txb*8 + 16*stdA;
uint loadA = fragmentA<OP_A,m16n16k16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,m16n16k16>::get_idx(tid, stdB, 16*stdA + (tid & 32));
uint b = idx_N*64 + txb*8;
bool inB = N64 || b < szState;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4;
uint offsetB = idx_B*szCtxHeadState + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,m16n16k16> fragC[2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b0 = {0};
uint4 b8 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint2 a0 = *(uint2*)&A[entry.x];
if (inB)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b8 = *(uint4*)&B[entry.y + 8*szHeadState];
}
__syncthreads();
*(uint2*)&hShare[storA] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 8*stdB] = b8;
__syncthreads();
fragmentA<OP_A,m16n16k16> fragA;
fragmentB<OP_N,m16n16k16> fragB;
fragA.load(hShare, loadA, stdA);
#pragma unroll
for (int j = 0; j < 2; j++)
{
fragB.load(hShare, loadB + j*16, stdB);
fragC[j].mma_sync(fragA, fragB);
}
} while (++idx_lut < lut_size);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m16n16k16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadState + (idx_M*16 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[j].store(hShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 4; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*16 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
*(uint4*)&C[offsetC + szHeadState*8] = zero;
}
}
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_8x64x8_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 8;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 8*8; // 64 entries of A per block
entry.y *= szHeadState*8; // 8 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint t32 = tid & 32;
uint t31 = tid & 31;
uint txb = tid % 8;
uint tyb = t31 / 8;
uint storA = tid*2;
uint storB = tyb*stdB + txb*8 + t32*20 + 16*stdA;
uint loadA = fragmentA<OP_A,m8n32k16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,m8n32k16>::get_idx(tid, stdB, t32 + 16*stdA);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + t31*2;
uint offsetB = idx_B*szCtxHeadState + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,m8n32k16> fragC;
uint idx_lut = t32 / 32;
uint idx_lut2 = 0;
uint lut_size2 = (lut_size + 1)/2;
#pragma unroll 1
do
{
uint a0 = 0;
uint4 b0 = {0};
uint4 b4 = {0};
if (idx_lut < lut_size)
{
uint2 entry = Lut2s[idx_lut];
entry.x += offsetA;
entry.y += offsetB;
a0 = *(uint*)&A[entry.x];
if (b < szState)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b4 = *(uint4*)&B[entry.y + 4*szHeadState];
}
}
__syncthreads();
*(uint* )&hShare[storA ] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 4*stdB] = b4;
__syncthreads();
fragmentA<OP_A,m8n32k16> fragA;
fragmentB<OP_N,m8n32k16> fragB;
fragA.load(hShare, loadA, stdA);
fragB.load(hShare, loadB, stdB);
fragC.mma_sync(fragA, fragB);
idx_lut += 2;
} while (++idx_lut2 < lut_size2);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m8n32k16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadState + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
fragC.store(hShare, storC, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 2; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint txc = tid % 8;
uint tyc = tid / 8;
uint c = idx_N*64 + txc*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
// 32x32x32 warp tile
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 64 + 8;
const uint stdB = 64 + 8;
const uint stdC = 64*4 + 4;
__shared__ ehalf hShare[(stdA + stdB)*64];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadState + (idx_M*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadState + (idx_N*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetA32 = offsetA00 + szHeadState*32;
uint offsetB32 = offsetB00 + szHeadState*32;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,m16n16k16>::get_idx(tid, stdA, (tid & 64)*stdA*32/64 + (tid & 128)*32/128 + 0*stdA);
uint loadB = fragmentB<OP_T,m16n16k16>::get_idx(tid, stdB, (tid & 32)*stdB*32/32 + (tid & 128)*32/128 + 64*stdA);
fragmentC<OP_N,OP_T,m16n16k16> fragC[2][2]; // m,n
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a32 = {0};
uint4 b00 = {0}, b32 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a32 = *(uint4*)&A[offsetA32];
b00 = *(uint4*)&B[offsetB00];
b32 = *(uint4*)&B[offsetB32];
}
offsetA00 += 64;
offsetA32 += 64;
offsetB00 += 64;
offsetB32 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_N,m16n16k16> fragA[2][2]; // m,k
fragmentB<OP_T,m16n16k16> fragB[2][2]; // n,k
for (int m = 0; m < 2; m++)
for (int k = 0; k < 2; k++)
fragA[m][k].load(hShare, loadA + m*16*stdA + k*16, stdA);
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragB[n][k].load(hShare, loadB + n*16*stdB + k*16, stdB);
for (int m = 0; m < 2; m++)
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragC[m][n].mma_sync(fragA[m][k], fragB[n][k]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint txc = tid % 16;
uint tyc = tid / 16;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_N,OP_T,m16n16k16>::get_idx(tid, stdC, (tid & 224));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*64*64 + tid*4;
for (int m = 0; m < 2; m++)
{
__syncthreads();
for (int n = 0; n < 2; n++)
fragC[m][n].store(fShare, storC + n*16, stdC);
__syncthreads();
for (int i = 0; i < 2; i++)
{
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + i*64 + 0*128],
*(float4*)&fShare[loadC + i*64 + 1*128]
);
store((CV*)(C + 64*(i*32 + m*16)), sum4);
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadState + (idx_M*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadState + (idx_N*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetA16 = offsetA00 + szHeadState*16;
uint offsetB16 = offsetB00 + szHeadState*16;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,m16n16k16>::get_idx(tid, stdA, (tid & 96)/2);
uint loadB = fragmentB<OP_T,m16n16k16>::get_idx(tid, stdB, (tid & 96)/2 + stdA*32);
fragmentC<OP_N,OP_T,m16n16k16> fragC[2][2];
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a16 = {0};
uint4 b00 = {0}, b16 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a16 = *(uint4*)&A[offsetA16];
b00 = *(uint4*)&B[offsetB00];
b16 = *(uint4*)&B[offsetB16];
}
offsetA00 += 64;
offsetA16 += 64;
offsetB00 += 64;
offsetB16 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 16*stdA + 0*stdA] = a16;
*(uint4*)&hShare[storB + 0*stdB + 32*stdA] = b00;
*(uint4*)&hShare[storB + 16*stdB + 32*stdA] = b16;
__syncthreads();
fragmentA<OP_N,m16n16k16> fragA[2];
fragmentB<OP_T,m16n16k16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + stdA*i*16, stdA);
fragB[i].load(hShare, loadB + stdB*i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 8;
ty = tid / 8;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,m16n16k16>::get_idx(tid, stdC, (tid & 96));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*4;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
float4 sum4 = ew_add(
ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 32]),
ew_add(
*(float4*)&fShare[loadC + 64],
*(float4*)&fShare[loadC + 96]));
store((CV*)(C + i*4*128), sum4);
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// dds: A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x16x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 48;
__shared__ ehalf hShare[(stdA + stdB)*16];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadState + (idx_M*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadState + (idx_N*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetA8 = offsetA0 + szHeadState*8;
uint offsetB8 = offsetB0 + szHeadState*8;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,m16n16k16>::get_idx(tid, stdA, (tid & 32));
uint loadB = fragmentB<OP_T,m16n16k16>::get_idx(tid, stdB, (tid & 32) + 16*stdA);
fragmentC<OP_N,OP_T,m16n16k16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a0 = {0}, a8 = {0};
uint4 b0 = {0}, b8 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a8 = *(uint4*)&A[offsetA8];
b0 = *(uint4*)&B[offsetB0];
b8 = *(uint4*)&B[offsetB8];
}
offsetA0 += 64;
offsetA8 += 64;
offsetB0 += 64;
offsetB8 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a0;
*(uint4*)&hShare[storA + 8*stdA + 0*stdA] = a8;
*(uint4*)&hShare[storB + 0*stdB + 16*stdA] = b0;
*(uint4*)&hShare[storB + 8*stdB + 16*stdA] = b8;
__syncthreads();
fragmentA<OP_N,m16n16k16> fragA;
fragmentB<OP_T,m16n16k16> fragB;
#pragma unroll
for (uint j = 0; j < 2; j++)
{
fragA.load(hShare, loadA + j*16, stdA);
fragB.load(hShare, loadB + j*16, stdB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 4;
ty = tid / 4;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,m16n16k16>::get_idx(tid, stdC, (tid & 32)/2);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*16*16 + tid*4;
__syncthreads();
fragC.store(fShare, storC, stdC);
__syncthreads();
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 16]);
store((CV*)C, sum4);
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_blocksparse_8x8x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdAB = 72;
const uint stdC = 8;
__shared__ ehalf hShare[stdAB*8*2];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadState + (idx_M*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadState + (idx_N*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetA4 = offsetA0 + szHeadState*4;
uint offsetB4 = offsetB0 + szHeadState*4;
uint storAB = ty*stdAB + k;
uint loadA = fragmentA<OP_N,m8n8k16>::get_idx(tid, stdAB, 0*stdAB);
uint loadB = fragmentB<OP_T,m8n8k16>::get_idx(tid, stdAB, 8*stdAB);
fragmentC<OP_N,OP_T,m8n8k16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
uint4 a0 = {0}, a4 = {0};
uint4 b0 = {0}, b4 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a4 = *(uint4*)&A[offsetA4];
b0 = *(uint4*)&B[offsetB0];
b4 = *(uint4*)&B[offsetB4];
}
offsetA0 += 64;
offsetA4 += 64;
offsetB0 += 64;
offsetB4 += 64;
if (!K64)
k += 64;
*(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0;
*(uint4*)&hShare[storAB + 4*stdAB + 0*stdAB] = a4;
*(uint4*)&hShare[storAB + 0*stdAB + 8*stdAB] = b0;
*(uint4*)&hShare[storAB + 4*stdAB + 8*stdAB] = b4;
fragmentA<OP_N,m8n8k16> fragA;
fragmentB<OP_T,m8n8k16> fragB;
#pragma unroll
for (uint j = 0; j < 4; j++)
{
fragA.load(hShare, loadA + j*16, stdAB);
fragB.load(hShare, loadB + j*16, stdAB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint storC = fragmentC<OP_N,OP_T,m8n8k16>::get_idx(tid, stdC);
fragC.store(fShare, storC, stdC);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*8*8 + tid*2;
store((CV*)C, *(float2*)&fShare[tid*2]);
}
#else // __CUDA_ARCH__ >= 700
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x64x32_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x64x16_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_8x64x8_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x16x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_blocksparse_8x8x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
#endif // __CUDA_ARCH__ >= 700
bool blocksparse_transformer_xn(hipStream_t stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
ehalf* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadState = ctx_blks * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
// compound gridDim.x with m and n coords
uint gridN = CEIL_DIV(state_dim, 64);
uint gridM = ctx_blks - 1;
uint gridX = ctx_blks * gridN;
uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry
dim3 grid(gridX, batch_dim, head_dim);
if (op == 1) // NN
{
if (block_size == 8)
hipLaunchKernelGGL(( hgemm_blocksparse_8x64x8_xn_sdd<OP_N,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hipLaunchKernelGGL(( hgemm_blocksparse_16x64x16_xn_sdd<OP_N,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hipLaunchKernelGGL(( hgemm_blocksparse_32x64x32_xn_sdd<OP_N,false>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hipLaunchKernelGGL(( hgemm_blocksparse_64x64x64_xn_sdd<OP_N,false>), dim3(grid),dim3(256),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
else // TN
{
if (block_size == 8)
hipLaunchKernelGGL(( hgemm_blocksparse_8x64x8_xn_sdd<OP_T,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hipLaunchKernelGGL(( hgemm_blocksparse_16x64x16_xn_sdd<OP_T,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hipLaunchKernelGGL(( hgemm_blocksparse_32x64x32_xn_sdd<OP_T,false>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hipLaunchKernelGGL(( hgemm_blocksparse_64x64x64_xn_sdd<OP_T,false>), dim3(grid),dim3(256),shared,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
return true;
}
template <typename CT, typename CV2, typename CV4>
bool blocksparse_transformer_nt(hipStream_t stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
CT* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadState = ctx_blks * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint loops = CEIL_DIV(state_dim, 64);
bool k64 = (state_dim & 63) == 0;
dim3 grid(blocks, batch_dim, head_dim);
if (block_size == 8)
{
if (k64)
hipLaunchKernelGGL(( hgemm_blocksparse_8x8x64_nt_dds<CT,CV2, true>), dim3(grid), dim3(32),0,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( hgemm_blocksparse_8x8x64_nt_dds<CT,CV2,false>), dim3(grid), dim3(32),0,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 16)
{
if (k64)
hipLaunchKernelGGL(( hgemm_blocksparse_16x16x64_nt_dds<CT,CV4, true>), dim3(grid), dim3(64),0,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( hgemm_blocksparse_16x16x64_nt_dds<CT,CV4,false>), dim3(grid), dim3(64),0,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 32)
hipLaunchKernelGGL(( hgemm_blocksparse_32x32x64_nt_dds<CT,CV4,false>), dim3(grid),dim3(128),0,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( hgemm_blocksparse_64x64x64_nt_dds<CT,CV4,false>), dim3(grid),dim3(256),0,stream, lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
// hipError_t error = hipGetLastError();
// printf("%s\n%s\n", hipGetErrorName(error), hipGetErrorString(error));
return true;
}
template bool blocksparse_transformer_nt<ehalf,ehalf2,ehalf4>(hipStream_t stream, const uint2* lut, const ehalf* a, const ehalf* b, ehalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template bool blocksparse_transformer_nt<bhalf,bhalf2,bhalf4>(hipStream_t stream, const uint2* lut, const ehalf* a, const ehalf* b, bhalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
//printf("%3d %3d %3d %08x\n", idx_Q, idx_q, i, entry.y);
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float xval[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetX = offset + entry.x;
bool in = lut_idx + i < lut_size;
float val = load(X + offsetX, 0, in);
xval[i] = in && (entry.y & mask_bit) != 0 ? val : -FLT_MAX;
}
// reduce within thread
float Xmax[U];
for (int i = 0; i < U; i++)
Xmax[i] = xval[i];
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]);
float xmax = Xmax[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// compute exponent of softmax
float Xsum[U];
for (int i = 0; i < U; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float exp = (xval[i] - xmax) * scale;
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(exp) :);
Xsum[i] = xval[i] = exp;
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xsum[i] = Xsum[i] + Xsum[i+j];
float exp_sum = Xsum[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
#pragma unroll
for (int i = 0; i < U; i++)
{
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(xval[i] * rcp_exp_sum));
uint offsetY = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(Y + offsetY, out);
}
}
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax_grad(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut and/or mask
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float dy[U], y[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetY = offset + entry.x;
bool in = lut_idx + i < lut_size && (entry.y & mask_bit) != 0;
dy[i] = load(DY + offsetY, 0, in);
y[i] = load(Y + offsetY, 0, in);
}
// compute dy * y
float dyy[U];
for (int i = 0; i < U; i++)
dyy[i] = dy[i] * y[i];
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
dyy[i] = dyy[i] + dyy[i+j];
float sum_dyy = dyy[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
// dx = (dy - sum_dyy) * y * scale
#pragma unroll
for (int i = 0; i < U; i++)
{
float dx = (dy[i] - sum_dyy) * y[i] * scale;
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(dx));
uint offsetX = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(DX + offsetX, out);
}
}
typedef unsigned long long uint64;
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64(
const uint2* __restrict__ Lut,
const uint64* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint max_lut, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint64* LutMask64 = (uint64*)&Sum[32];
uint* LutMask32 = (uint*)&Sum[32];
uint* LutOffset = (uint*)&LutMask64[max_lut];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < max_lut; i += blockDim.x)
{
uint64 mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * 64*64;
mask = use_mask ? __ldg(Mask + blk_id) : 0xffffffffffffffff;
}
LutMask64[i] = mask;
}
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
X += offset;
bhalf2 xval[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// nvcc/ptxas is really bad at generating sass that maximizes use of immediate offsets.
// This means way more registers are tied up in memory load addresses than are needed.
// This kernel's performance is hugely dependent on efficient register use so give the compiler a a hand:
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%2, 0}; \n\t"
"add.s64 X, %1, offset; \n\t"
"mov.u32 %0, 0xff80ff80; \n\t" // bhalf2 -inf, -inf
"@p ld.global.nc.u32 %0, [X]; \n\t"
"}" :"=r"(xval[i].x) : "l"(X), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// split the 64 bit mask by half warp
uint tid16 = (tid & 16)/16;
uint mask0 = 1 << (tidx - tid16*32);
uint mask1 = mask0 << 1;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
uint mask32 = LutMask32[(lut_idx + i)*2 + tid16];
if ((mask32 & mask0) == 0)
xval[i].x = (xval[i].x & 0xffff0000) | 0x0000ff80; // 0x0000fc00
if ((mask32 & mask1) == 0)
xval[i].x = (xval[i].x & 0x0000ffff) | 0xff800000;
}
// reduce within thread
float Xmax[UNROLL];
for (int i = 0; i < UNROLL; i++)
Xmax[i] = ew_max(to_float(xval[i]));
float xmax = Xmax[0];
for (int i = 1; i < UNROLL; i++)
xmax = fmaxf(Xmax[i], xmax);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// subtract xmax and compute exponent
float exp_sum = 0;
for (int i = 0; i < UNROLL; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float2 Xval = ew_mul(ew_sub(to_float(xval[i]), xmax), scale);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.x) :);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.y) :);
exp_sum += ew_sum(Xval);
xval[i] = to_bhalf(Xval);
}
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
Y += offset;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
ehalf2 y = to_ehalf(ew_mul(to_float(xval[i]), rcp_exp_sum));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 X, %0, offset; \n\t"
"@p st.global.wb.u32 [X], %2; \n\t"
"}" :: "l"(Y), "r"(i*64*64*2), "r"(y.x), "r"(lut_idx + i), "r"(lut_size));
}
}
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64_grad(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init)
{
__shared__ float Sum[32];
uint* LutOffset = (uint*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
LutOffset[i] = Lut[i].x * 64*64;
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
DY += offset;
Y += offset;
ehalf2 dy[UNROLL], y[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DY, Y, offset; \n\t"
"setp.lt.u32 p, %5, %6; \n\t"
"mov.b64 offset, {%4, 0}; \n\t"
"add.s64 DY, %2, offset; \n\t"
"add.s64 Y, %3, offset; \n\t"
"mov.u32 %0, 0; \n\t"
"mov.u32 %1, 0; \n\t"
"@p ld.global.nc.u32 %0, [DY]; \n\t"
"@p ld.global.nc.u32 %1, [Y]; \n\t"
"}" : "=r"(dy[i].x), "=r"(y[i].x) : "l"(DY), "l"(Y), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// compute dy * y and start reduction
float sum_dyy = 0.0f;
for (int i = 0; i < UNROLL; i++)
sum_dyy += ew_sum(ew_mul(to_float(dy[i]), to_float(y[i])));
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
DX += offset;
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// dx = (dy - sum_dyy) * y * scale
ehalf2 dx = to_ehalf(ew_mul(ew_mul(ew_sub(to_float(dy[i]), sum_dyy), to_float(y[i])), scale));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DX, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 DX, %0, offset; \n\t"
"@p st.global.wb.u32 [DX], %2; \n\t"
"}" :: "l"(DX), "r"(i*64*64*2), "r"(dx.x), "r"(lut_idx + i), "r"(lut_size));
}
}
#define LOG2e 1.4426950408889634f
typedef unsigned char uchar;
bool BlocksparseMaskedSoftmax(hipStream_t stream,
const uint2* lut,
const char* mask,
const bhalf* x,
ehalf* y,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//hipMemsetD16Async((hipDeviceptr_t)c, 0, szBatch*batch_dim, stream);
// combine scaling with fast e**x compute
scale *= LOG2e;
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
// Unroll factor 8 (ctx_size up to 16K)
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
uint lut_max = threads * 8 / 32;
uint shared = lut_max * 12;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64<8>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor of 4 is preferred (keeps these kernels under 32 registers for max occupancy)
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 4 / 32;
uint shared = lut_max * 12;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64<4>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 1 / 32;
uint shared = lut_max * 12;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64<1>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8 (ctx_size up to 8K)
if (maxK > 1024*4)
{
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax<8, 8, uchar>), dim3(grid),dim3(1024),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax<8,16,ushort>), dim3(grid),dim3(1024),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax<8,32, uint>), dim3(grid),dim3(1024),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax<4, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax<4,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax<4,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax<1, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax<1,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax<1,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
bool BlocksparseMaskedSoftmaxGrad(hipStream_t stream,
const uint2* lut,
const char* mask,
const ehalf* dy,
const ehalf* y,
ehalf* dx,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//hipMemsetD16Async((hipDeviceptr_t)c, 0, szBatch*batch_dim, stream);
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
uint shared = max_lut*4;
// Unroll factor 8
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64_grad<8>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor of 4 is preferred
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64_grad<4>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64_grad<1>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8
if (maxK > 1024*4)
{
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<8, 8, uchar>), dim3(grid),dim3(1024),shared,stream, lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<8,16,ushort>), dim3(grid),dim3(1024),shared,stream, lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<8,32, uint>), dim3(grid),dim3(1024),shared,stream, lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<4, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<4,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<4,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<1, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<1,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<1,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
#endif // GOOGLE_CUDA
| dd1351bec9c1e86d851dc92eb169cd3f5127149f.cu | #if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
#if __CUDA_ARCH__ >= 700
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
// 32x64x16 warp tile
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 64+16;
const uint stdB = 64+16;
const uint stdC = 512+4;
__shared__ float fShare[stdC*16];
ehalf* hShare = (ehalf*)fShare;
uint2* Lut2s = (uint2*)&fShare[stdC*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint tx = tid % 8;
uint ty = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 256)
{
uint2 entry = Lut[i];
entry.x *= 64*64; // 4096 entries of A per block
entry.y *= szHeadState*64; // 64 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint storAB = ty*stdA + tx*8; // assume stdA == stdB
uint loadA = fragmentA<OP_A,m16n16k16>::get_idx(tid, stdA, (tid & 192)*(OP_A == OP_N ? 1 : stdA)*16/64 + (tid & 32)*(OP_A == OP_N ? stdA : 1));
uint loadB = fragmentB<OP_N,m16n16k16>::get_idx(tid, stdB, (tid & 192)*stdB*16/64 + stdA*64);
uint b = idx_N*64 + tx*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadState + ty*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,m16n16k16> fragC[2][4];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b32 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x + 0*64];
uint4 a32 = *(uint4*)&A[entry.x + 32*64];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b32 = *(uint4*)&B[entry.y + 32*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storAB + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storAB + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storAB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storAB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_A,m16n16k16> fragA[2];
fragmentB<OP_N,m16n16k16> fragB[4];
for (int i = 0; i < 2; i++)
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
for (int i = 0; i < 4; i++)
fragB[i].load(hShare, loadB + i*16, stdB);
for (int i = 0; i < 2; i++)
for (int j = 0; j < 4; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m16n16k16>::get_idx(tid, stdC, (tid & 224)*2);
uint offsetC = idx_B*szCtxHeadState + (idx_M*64 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 4; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*32 + i*16)] = to_half4(
ew_add(
ew_add(
*(float4*)&fShare[loadC + j*64 + 0*128],
*(float4*)&fShare[loadC + j*64 + 1*128]),
ew_add(
*(float4*)&fShare[loadC + j*64 + 2*128],
*(float4*)&fShare[loadC + j*64 + 3*128])
)
);
}
}
}
else
{
uint c = idx_N*64 + tx*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*64 + ty)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*32] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x64x32_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 48;
const uint stdB = 80;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*32];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 128)
{
uint2 entry = Lut[i];
entry.x *= 32*32; // 1024 entries of A per block
entry.y *= szHeadState*32; // 32 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*8;
uint storB = tyb*stdB + txb*8 + stdA*32;
uint loadA = fragmentA<OP_A,m16n16k16>::get_idx(tid, stdA, (tid & 64)*(OP_A == OP_N ? 1 : stdA)*16/64);
uint loadB = fragmentB<OP_N,m16n16k16>::get_idx(tid, stdB, (tid & 64)*stdB*16/64 + (tid & 32) + stdA*32);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadState + tyb*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,m16n16k16> fragC[2][2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b16 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b16 = *(uint4*)&B[entry.y + 16*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storA] = a00;
*(uint4*)&hShare[storB + 0*stdB] = b00;
*(uint4*)&hShare[storB + 16*stdB] = b16;
__syncthreads();
fragmentA<OP_A,m16n16k16> fragA[2];
fragmentB<OP_N,m16n16k16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
fragB[i].load(hShare, loadB + i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m16n16k16>::get_idx(tid, stdC, tid & 96);
uint offsetC = idx_B*szCtxHeadState + (idx_M*32 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*8 + i*16)] = to_half4(ew_add(
*(float4*)&fShare[loadC + stdC*j*8 + 0],
*(float4*)&fShare[loadC + stdC*j*8 + 64]));
}
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*16] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x64x16_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 16;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 16*16; // 256 entries of A per block
entry.y *= szHeadState*16; // 16 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*4;
uint storB = tyb*stdB + txb*8 + 16*stdA;
uint loadA = fragmentA<OP_A,m16n16k16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,m16n16k16>::get_idx(tid, stdB, 16*stdA + (tid & 32));
uint b = idx_N*64 + txb*8;
bool inB = N64 || b < szState;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4;
uint offsetB = idx_B*szCtxHeadState + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,m16n16k16> fragC[2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b0 = {0};
uint4 b8 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint2 a0 = *(uint2*)&A[entry.x];
if (inB)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b8 = *(uint4*)&B[entry.y + 8*szHeadState];
}
__syncthreads();
*(uint2*)&hShare[storA] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 8*stdB] = b8;
__syncthreads();
fragmentA<OP_A,m16n16k16> fragA;
fragmentB<OP_N,m16n16k16> fragB;
fragA.load(hShare, loadA, stdA);
#pragma unroll
for (int j = 0; j < 2; j++)
{
fragB.load(hShare, loadB + j*16, stdB);
fragC[j].mma_sync(fragA, fragB);
}
} while (++idx_lut < lut_size);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m16n16k16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadState + (idx_M*16 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[j].store(hShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 4; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*16 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
*(uint4*)&C[offsetC + szHeadState*8] = zero;
}
}
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_8x64x8_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 8;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 8*8; // 64 entries of A per block
entry.y *= szHeadState*8; // 8 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint t32 = tid & 32;
uint t31 = tid & 31;
uint txb = tid % 8;
uint tyb = t31 / 8;
uint storA = tid*2;
uint storB = tyb*stdB + txb*8 + t32*20 + 16*stdA;
uint loadA = fragmentA<OP_A,m8n32k16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,m8n32k16>::get_idx(tid, stdB, t32 + 16*stdA);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + t31*2;
uint offsetB = idx_B*szCtxHeadState + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,m8n32k16> fragC;
uint idx_lut = t32 / 32;
uint idx_lut2 = 0;
uint lut_size2 = (lut_size + 1)/2;
#pragma unroll 1
do
{
uint a0 = 0;
uint4 b0 = {0};
uint4 b4 = {0};
if (idx_lut < lut_size)
{
uint2 entry = Lut2s[idx_lut];
entry.x += offsetA;
entry.y += offsetB;
a0 = *(uint*)&A[entry.x];
if (b < szState)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b4 = *(uint4*)&B[entry.y + 4*szHeadState];
}
}
__syncthreads();
*(uint* )&hShare[storA ] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 4*stdB] = b4;
__syncthreads();
fragmentA<OP_A,m8n32k16> fragA;
fragmentB<OP_N,m8n32k16> fragB;
fragA.load(hShare, loadA, stdA);
fragB.load(hShare, loadB, stdB);
fragC.mma_sync(fragA, fragB);
idx_lut += 2;
} while (++idx_lut2 < lut_size2);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,m8n32k16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadState + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
fragC.store(hShare, storC, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 2; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint txc = tid % 8;
uint tyc = tid / 8;
uint c = idx_N*64 + txc*8;
uint offsetC = idx_B*szCtxHeadState + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
// 32x32x32 warp tile
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 64 + 8;
const uint stdB = 64 + 8;
const uint stdC = 64*4 + 4;
__shared__ ehalf hShare[(stdA + stdB)*64];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadState + (idx_M*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadState + (idx_N*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetA32 = offsetA00 + szHeadState*32;
uint offsetB32 = offsetB00 + szHeadState*32;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,m16n16k16>::get_idx(tid, stdA, (tid & 64)*stdA*32/64 + (tid & 128)*32/128 + 0*stdA);
uint loadB = fragmentB<OP_T,m16n16k16>::get_idx(tid, stdB, (tid & 32)*stdB*32/32 + (tid & 128)*32/128 + 64*stdA);
fragmentC<OP_N,OP_T,m16n16k16> fragC[2][2]; // m,n
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a32 = {0};
uint4 b00 = {0}, b32 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a32 = *(uint4*)&A[offsetA32];
b00 = *(uint4*)&B[offsetB00];
b32 = *(uint4*)&B[offsetB32];
}
offsetA00 += 64;
offsetA32 += 64;
offsetB00 += 64;
offsetB32 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_N,m16n16k16> fragA[2][2]; // m,k
fragmentB<OP_T,m16n16k16> fragB[2][2]; // n,k
for (int m = 0; m < 2; m++)
for (int k = 0; k < 2; k++)
fragA[m][k].load(hShare, loadA + m*16*stdA + k*16, stdA);
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragB[n][k].load(hShare, loadB + n*16*stdB + k*16, stdB);
for (int m = 0; m < 2; m++)
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragC[m][n].mma_sync(fragA[m][k], fragB[n][k]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint txc = tid % 16;
uint tyc = tid / 16;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_N,OP_T,m16n16k16>::get_idx(tid, stdC, (tid & 224));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*64*64 + tid*4;
for (int m = 0; m < 2; m++)
{
__syncthreads();
for (int n = 0; n < 2; n++)
fragC[m][n].store(fShare, storC + n*16, stdC);
__syncthreads();
for (int i = 0; i < 2; i++)
{
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + i*64 + 0*128],
*(float4*)&fShare[loadC + i*64 + 1*128]
);
store((CV*)(C + 64*(i*32 + m*16)), sum4);
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadState + (idx_M*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadState + (idx_N*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetA16 = offsetA00 + szHeadState*16;
uint offsetB16 = offsetB00 + szHeadState*16;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,m16n16k16>::get_idx(tid, stdA, (tid & 96)/2);
uint loadB = fragmentB<OP_T,m16n16k16>::get_idx(tid, stdB, (tid & 96)/2 + stdA*32);
fragmentC<OP_N,OP_T,m16n16k16> fragC[2][2];
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a16 = {0};
uint4 b00 = {0}, b16 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a16 = *(uint4*)&A[offsetA16];
b00 = *(uint4*)&B[offsetB00];
b16 = *(uint4*)&B[offsetB16];
}
offsetA00 += 64;
offsetA16 += 64;
offsetB00 += 64;
offsetB16 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 16*stdA + 0*stdA] = a16;
*(uint4*)&hShare[storB + 0*stdB + 32*stdA] = b00;
*(uint4*)&hShare[storB + 16*stdB + 32*stdA] = b16;
__syncthreads();
fragmentA<OP_N,m16n16k16> fragA[2];
fragmentB<OP_T,m16n16k16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + stdA*i*16, stdA);
fragB[i].load(hShare, loadB + stdB*i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 8;
ty = tid / 8;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,m16n16k16>::get_idx(tid, stdC, (tid & 96));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*4;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
float4 sum4 = ew_add(
ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 32]),
ew_add(
*(float4*)&fShare[loadC + 64],
*(float4*)&fShare[loadC + 96]));
store((CV*)(C + i*4*128), sum4);
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// dds: A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x16x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 48;
__shared__ ehalf hShare[(stdA + stdB)*16];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadState + (idx_M*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadState + (idx_N*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetA8 = offsetA0 + szHeadState*8;
uint offsetB8 = offsetB0 + szHeadState*8;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,m16n16k16>::get_idx(tid, stdA, (tid & 32));
uint loadB = fragmentB<OP_T,m16n16k16>::get_idx(tid, stdB, (tid & 32) + 16*stdA);
fragmentC<OP_N,OP_T,m16n16k16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a0 = {0}, a8 = {0};
uint4 b0 = {0}, b8 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a8 = *(uint4*)&A[offsetA8];
b0 = *(uint4*)&B[offsetB0];
b8 = *(uint4*)&B[offsetB8];
}
offsetA0 += 64;
offsetA8 += 64;
offsetB0 += 64;
offsetB8 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a0;
*(uint4*)&hShare[storA + 8*stdA + 0*stdA] = a8;
*(uint4*)&hShare[storB + 0*stdB + 16*stdA] = b0;
*(uint4*)&hShare[storB + 8*stdB + 16*stdA] = b8;
__syncthreads();
fragmentA<OP_N,m16n16k16> fragA;
fragmentB<OP_T,m16n16k16> fragB;
#pragma unroll
for (uint j = 0; j < 2; j++)
{
fragA.load(hShare, loadA + j*16, stdA);
fragB.load(hShare, loadB + j*16, stdB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 4;
ty = tid / 4;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,m16n16k16>::get_idx(tid, stdC, (tid & 32)/2);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*16*16 + tid*4;
__syncthreads();
fragC.store(fShare, storC, stdC);
__syncthreads();
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 16]);
store((CV*)C, sum4);
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_blocksparse_8x8x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdAB = 72;
const uint stdC = 8;
__shared__ ehalf hShare[stdAB*8*2];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadState + (idx_M*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadState + (idx_N*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetA4 = offsetA0 + szHeadState*4;
uint offsetB4 = offsetB0 + szHeadState*4;
uint storAB = ty*stdAB + k;
uint loadA = fragmentA<OP_N,m8n8k16>::get_idx(tid, stdAB, 0*stdAB);
uint loadB = fragmentB<OP_T,m8n8k16>::get_idx(tid, stdAB, 8*stdAB);
fragmentC<OP_N,OP_T,m8n8k16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
uint4 a0 = {0}, a4 = {0};
uint4 b0 = {0}, b4 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a4 = *(uint4*)&A[offsetA4];
b0 = *(uint4*)&B[offsetB0];
b4 = *(uint4*)&B[offsetB4];
}
offsetA0 += 64;
offsetA4 += 64;
offsetB0 += 64;
offsetB4 += 64;
if (!K64)
k += 64;
*(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0;
*(uint4*)&hShare[storAB + 4*stdAB + 0*stdAB] = a4;
*(uint4*)&hShare[storAB + 0*stdAB + 8*stdAB] = b0;
*(uint4*)&hShare[storAB + 4*stdAB + 8*stdAB] = b4;
fragmentA<OP_N,m8n8k16> fragA;
fragmentB<OP_T,m8n8k16> fragB;
#pragma unroll
for (uint j = 0; j < 4; j++)
{
fragA.load(hShare, loadA + j*16, stdAB);
fragB.load(hShare, loadB + j*16, stdAB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint storC = fragmentC<OP_N,OP_T,m8n8k16>::get_idx(tid, stdC);
fragC.store(fShare, storC, stdC);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*8*8 + tid*2;
store((CV*)C, *(float2*)&fShare[tid*2]);
}
#else // __CUDA_ARCH__ >= 700
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x64x32_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x64x16_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_8x64x8_xn_sdd(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_blocksparse_16x16x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_blocksparse_8x8x64_nt_dds(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadState, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
#endif // __CUDA_ARCH__ >= 700
bool blocksparse_transformer_xn(CUstream stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
ehalf* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadState = ctx_blks * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
// compound gridDim.x with m and n coords
uint gridN = CEIL_DIV(state_dim, 64);
uint gridM = ctx_blks - 1;
uint gridX = ctx_blks * gridN;
uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry
dim3 grid(gridX, batch_dim, head_dim);
if (op == 1) // NN
{
if (block_size == 8)
hgemm_blocksparse_8x64x8_xn_sdd<OP_N,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hgemm_blocksparse_16x64x16_xn_sdd<OP_N,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hgemm_blocksparse_32x64x32_xn_sdd<OP_N,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hgemm_blocksparse_64x64x64_xn_sdd<OP_N,false><<<grid,256,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
else // TN
{
if (block_size == 8)
hgemm_blocksparse_8x64x8_xn_sdd<OP_T,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hgemm_blocksparse_16x64x16_xn_sdd<OP_T,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hgemm_blocksparse_32x64x32_xn_sdd<OP_T,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hgemm_blocksparse_64x64x64_xn_sdd<OP_T,false><<<grid,256,shared,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
return true;
}
template <typename CT, typename CV2, typename CV4>
bool blocksparse_transformer_nt(CUstream stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
CT* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadState = ctx_blks * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint loops = CEIL_DIV(state_dim, 64);
bool k64 = (state_dim & 63) == 0;
dim3 grid(blocks, batch_dim, head_dim);
if (block_size == 8)
{
if (k64)
hgemm_blocksparse_8x8x64_nt_dds<CT,CV2, true><<<grid, 32,0,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hgemm_blocksparse_8x8x64_nt_dds<CT,CV2,false><<<grid, 32,0,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 16)
{
if (k64)
hgemm_blocksparse_16x16x64_nt_dds<CT,CV4, true><<<grid, 64,0,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hgemm_blocksparse_16x16x64_nt_dds<CT,CV4,false><<<grid, 64,0,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 32)
hgemm_blocksparse_32x32x64_nt_dds<CT,CV4,false><<<grid,128,0,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hgemm_blocksparse_64x64x64_nt_dds<CT,CV4,false><<<grid,256,0,stream>>>(lut, a, b, c, szCtxHeadState, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
// cudaError_t error = cudaGetLastError();
// printf("%s\n%s\n", cudaGetErrorName(error), cudaGetErrorString(error));
return true;
}
template bool blocksparse_transformer_nt<ehalf,ehalf2,ehalf4>(CUstream stream, const uint2* lut, const ehalf* a, const ehalf* b, ehalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template bool blocksparse_transformer_nt<bhalf,bhalf2,bhalf4>(CUstream stream, const uint2* lut, const ehalf* a, const ehalf* b, bhalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
//printf("%3d %3d %3d %08x\n", idx_Q, idx_q, i, entry.y);
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float xval[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetX = offset + entry.x;
bool in = lut_idx + i < lut_size;
float val = load(X + offsetX, 0, in);
xval[i] = in && (entry.y & mask_bit) != 0 ? val : -FLT_MAX;
}
// reduce within thread
float Xmax[U];
for (int i = 0; i < U; i++)
Xmax[i] = xval[i];
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]);
float xmax = Xmax[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// compute exponent of softmax
float Xsum[U];
for (int i = 0; i < U; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float exp = (xval[i] - xmax) * scale;
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(exp) :);
Xsum[i] = xval[i] = exp;
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xsum[i] = Xsum[i] + Xsum[i+j];
float exp_sum = Xsum[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
#pragma unroll
for (int i = 0; i < U; i++)
{
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(xval[i] * rcp_exp_sum));
uint offsetY = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(Y + offsetY, out);
}
}
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax_grad(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut and/or mask
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float dy[U], y[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetY = offset + entry.x;
bool in = lut_idx + i < lut_size && (entry.y & mask_bit) != 0;
dy[i] = load(DY + offsetY, 0, in);
y[i] = load(Y + offsetY, 0, in);
}
// compute dy * y
float dyy[U];
for (int i = 0; i < U; i++)
dyy[i] = dy[i] * y[i];
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
dyy[i] = dyy[i] + dyy[i+j];
float sum_dyy = dyy[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
// dx = (dy - sum_dyy) * y * scale
#pragma unroll
for (int i = 0; i < U; i++)
{
float dx = (dy[i] - sum_dyy) * y[i] * scale;
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(dx));
uint offsetX = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(DX + offsetX, out);
}
}
typedef unsigned long long uint64;
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64(
const uint2* __restrict__ Lut,
const uint64* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint max_lut, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint64* LutMask64 = (uint64*)&Sum[32];
uint* LutMask32 = (uint*)&Sum[32];
uint* LutOffset = (uint*)&LutMask64[max_lut];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < max_lut; i += blockDim.x)
{
uint64 mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * 64*64;
mask = use_mask ? __ldg(Mask + blk_id) : 0xffffffffffffffff;
}
LutMask64[i] = mask;
}
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
X += offset;
bhalf2 xval[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// nvcc/ptxas is really bad at generating sass that maximizes use of immediate offsets.
// This means way more registers are tied up in memory load addresses than are needed.
// This kernel's performance is hugely dependent on efficient register use so give the compiler a a hand:
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%2, 0}; \n\t"
"add.s64 X, %1, offset; \n\t"
"mov.u32 %0, 0xff80ff80; \n\t" // bhalf2 -inf, -inf
"@p ld.global.nc.u32 %0, [X]; \n\t"
"}" :"=r"(xval[i].x) : "l"(X), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// split the 64 bit mask by half warp
uint tid16 = (tid & 16)/16;
uint mask0 = 1 << (tidx - tid16*32);
uint mask1 = mask0 << 1;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
uint mask32 = LutMask32[(lut_idx + i)*2 + tid16];
if ((mask32 & mask0) == 0)
xval[i].x = (xval[i].x & 0xffff0000) | 0x0000ff80; // 0x0000fc00
if ((mask32 & mask1) == 0)
xval[i].x = (xval[i].x & 0x0000ffff) | 0xff800000;
}
// reduce within thread
float Xmax[UNROLL];
for (int i = 0; i < UNROLL; i++)
Xmax[i] = ew_max(to_float(xval[i]));
float xmax = Xmax[0];
for (int i = 1; i < UNROLL; i++)
xmax = fmaxf(Xmax[i], xmax);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// subtract xmax and compute exponent
float exp_sum = 0;
for (int i = 0; i < UNROLL; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float2 Xval = ew_mul(ew_sub(to_float(xval[i]), xmax), scale);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.x) :);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.y) :);
exp_sum += ew_sum(Xval);
xval[i] = to_bhalf(Xval);
}
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
Y += offset;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
ehalf2 y = to_ehalf(ew_mul(to_float(xval[i]), rcp_exp_sum));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 X, %0, offset; \n\t"
"@p st.global.wb.u32 [X], %2; \n\t"
"}" :: "l"(Y), "r"(i*64*64*2), "r"(y.x), "r"(lut_idx + i), "r"(lut_size));
}
}
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64_grad(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init)
{
__shared__ float Sum[32];
uint* LutOffset = (uint*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
LutOffset[i] = Lut[i].x * 64*64;
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
DY += offset;
Y += offset;
ehalf2 dy[UNROLL], y[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DY, Y, offset; \n\t"
"setp.lt.u32 p, %5, %6; \n\t"
"mov.b64 offset, {%4, 0}; \n\t"
"add.s64 DY, %2, offset; \n\t"
"add.s64 Y, %3, offset; \n\t"
"mov.u32 %0, 0; \n\t"
"mov.u32 %1, 0; \n\t"
"@p ld.global.nc.u32 %0, [DY]; \n\t"
"@p ld.global.nc.u32 %1, [Y]; \n\t"
"}" : "=r"(dy[i].x), "=r"(y[i].x) : "l"(DY), "l"(Y), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// compute dy * y and start reduction
float sum_dyy = 0.0f;
for (int i = 0; i < UNROLL; i++)
sum_dyy += ew_sum(ew_mul(to_float(dy[i]), to_float(y[i])));
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
DX += offset;
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// dx = (dy - sum_dyy) * y * scale
ehalf2 dx = to_ehalf(ew_mul(ew_mul(ew_sub(to_float(dy[i]), sum_dyy), to_float(y[i])), scale));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DX, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 DX, %0, offset; \n\t"
"@p st.global.wb.u32 [DX], %2; \n\t"
"}" :: "l"(DX), "r"(i*64*64*2), "r"(dx.x), "r"(lut_idx + i), "r"(lut_size));
}
}
#define LOG2e 1.4426950408889634f
typedef unsigned char uchar;
bool BlocksparseMaskedSoftmax(CUstream stream,
const uint2* lut,
const char* mask,
const bhalf* x,
ehalf* y,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//cuMemsetD16Async((CUdeviceptr)c, 0, szBatch*batch_dim, stream);
// combine scaling with fast e**x compute
scale *= LOG2e;
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
// Unroll factor 8 (ctx_size up to 16K)
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
uint lut_max = threads * 8 / 32;
uint shared = lut_max * 12;
blocksparse_masked_softmax_64x64<8><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor of 4 is preferred (keeps these kernels under 32 registers for max occupancy)
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 4 / 32;
uint shared = lut_max * 12;
blocksparse_masked_softmax_64x64<4><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 1 / 32;
uint shared = lut_max * 12;
blocksparse_masked_softmax_64x64<1><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8 (ctx_size up to 8K)
if (maxK > 1024*4)
{
if (block_size == 8)
blocksparse_masked_softmax<8, 8, uchar><<<grid,1024,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax<8,16,ushort><<<grid,1024,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
blocksparse_masked_softmax<8,32, uint><<<grid,1024,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax<4, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax<4,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax<4,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax<1, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax<1,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax<1,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
bool BlocksparseMaskedSoftmaxGrad(CUstream stream,
const uint2* lut,
const char* mask,
const ehalf* dy,
const ehalf* y,
ehalf* dx,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//cuMemsetD16Async((CUdeviceptr)c, 0, szBatch*batch_dim, stream);
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
uint shared = max_lut*4;
// Unroll factor 8
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
blocksparse_masked_softmax_64x64_grad<8><<<grid,threads,shared,stream>>>(lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor of 4 is preferred
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
blocksparse_masked_softmax_64x64_grad<4><<<grid,threads,shared,stream>>>(lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
blocksparse_masked_softmax_64x64_grad<1><<<grid,threads,shared,stream>>>(lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8
if (maxK > 1024*4)
{
if (block_size == 8)
blocksparse_masked_softmax_grad<8, 8, uchar><<<grid,1024,shared,stream>>>(lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax_grad<8,16,ushort><<<grid,1024,shared,stream>>>(lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
blocksparse_masked_softmax_grad<8,32, uint><<<grid,1024,shared,stream>>>(lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax_grad<4, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax_grad<4,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax_grad<4,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax_grad<1, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax_grad<1,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax_grad<1,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
#endif // GOOGLE_CUDA
|
8c7d303ac5700cdc8e8445b569df6c360c48cf45.hip | // !!! This is a file automatically generated by hipify!!!
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* covariance.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Array initialization. */
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "covariance.h"
int threadsPerBlock = 2;
__global__ void my_kernel ( int m, int n, double float_n, double (*cov)[1200] , double (*data)[1200] , int length, int offset)
{
int i;
int j;
int k;
i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= length)
return;
i += offset;
for (j = i; j < _PB_M; j++)
{
cov[i][j] = SCALAR_VAL(0.0);
for (k = 0; k < _PB_N; k++)
cov[i][j] += data[k][i] * data[k][j];
cov[i][j] /= (float_n - SCALAR_VAL(1.0));
cov[j][i] = cov[i][j];
}
}
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,N,M,n,m))
{
int i, j;
*float_n = (DATA_TYPE)n;
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(cov,M,M,m,m))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("cov");
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
if ((i * m + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, cov[i][j]);
}
POLYBENCH_DUMP_END("cov");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,N,M,n,m),
DATA_TYPE POLYBENCH_2D(cov,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
hipSetDevice(0);
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
hipStream_t *streams = (hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++) {
hipStreamCreate(&(streams[i]));
}
hipEvent_t start_event, stop_event;
float time_elapsed;
int eventflags = hipEventBlockingSync;
hipEventCreateWithFlags(&start_event, eventflags);
hipEventCreateWithFlags(&stop_event, eventflags);
printf("%d\t%d\t%d\t", (((m)-1)-0 + 1), threadsPerBlock, nstreams);
double (*d_cov)[1200];
hipMalloc((void **)&d_cov, (((m)-1)+ 1)* sizeof (double [1200]));
double (*d_data)[1200];
hipMalloc((void **)&d_data, (((n)-1)+ 1)* sizeof (double [1200]));
int i, j, k;
for (j = 0; j < _PB_M; j++)
{
mean[j] = SCALAR_VAL(0.0);
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
data[i][j] -= mean[j];
int ni = m;
int totalblocks = (ni +threadsPerBlock - 1) / threadsPerBlock;
int refblocks = totalblocks % nstreams;
int idlethreads = ni % threadsPerBlock ? threadsPerBlock - ni % threadsPerBlock : 0;
int blocksPerGrid[nstreams];
int threadsPerSubtask[nstreams];
int offset[nstreams];
for (int i = 0; i < nstreams; i++)
{
blocksPerGrid[i] = totalblocks / nstreams;
if (i < refblocks)
blocksPerGrid[i] ++;
threadsPerSubtask[i] = threadsPerBlock * blocksPerGrid[i];
}
threadsPerSubtask[nstreams - 1] -= idlethreads;
offset[0] = 0;
for (int i = 1; i < nstreams; i++)
offset[i] = offset[i-1] + threadsPerSubtask[i-1];
hipEventRecord(start_event, 0);
hipMemcpyAsync(d_data, data, (((n)-1)+ 1)* sizeof (double [1200]), hipMemcpyHostToDevice, streams[0]);
for (int i = 0; i < nstreams; i++)
{
hipLaunchKernelGGL(( my_kernel), dim3(blocksPerGrid[i]), dim3(threadsPerBlock),0, streams[i], m, n, float_n, d_cov, d_data, threadsPerSubtask[i], offset[i]);
}
hipMemcpyAsync(cov, d_cov, (((m)-1)+ 1)* sizeof (double [1200]),hipMemcpyDeviceToHost, streams[0]);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&time_elapsed, start_event, stop_event);
printf("%f\n", time_elapsed);
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
if (argc > 1)
threadsPerBlock = atoi(argv[1]);
if (argc > 2)
nstreams = atoi(argv[2]);
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,N,M,n,m);
POLYBENCH_2D_ARRAY_DECL(cov,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(cov),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(cov)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(cov);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
| 8c7d303ac5700cdc8e8445b569df6c360c48cf45.cu | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* covariance.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Array initialization. */
#include <cuda.h>
#include <cuda_runtime_api.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "covariance.h"
int threadsPerBlock = 2;
__global__ void my_kernel ( int m, int n, double float_n, double (*cov)[1200] , double (*data)[1200] , int length, int offset)
{
int i;
int j;
int k;
i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= length)
return;
i += offset;
for (j = i; j < _PB_M; j++)
{
cov[i][j] = SCALAR_VAL(0.0);
for (k = 0; k < _PB_N; k++)
cov[i][j] += data[k][i] * data[k][j];
cov[i][j] /= (float_n - SCALAR_VAL(1.0));
cov[j][i] = cov[i][j];
}
}
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,N,M,n,m))
{
int i, j;
*float_n = (DATA_TYPE)n;
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(cov,M,M,m,m))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("cov");
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
if ((i * m + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, cov[i][j]);
}
POLYBENCH_DUMP_END("cov");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,N,M,n,m),
DATA_TYPE POLYBENCH_2D(cov,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
cudaSetDevice(0);
cudaSetDeviceFlags(cudaDeviceBlockingSync);
cudaStream_t *streams = (cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++) {
cudaStreamCreate(&(streams[i]));
}
cudaEvent_t start_event, stop_event;
float time_elapsed;
int eventflags = cudaEventBlockingSync;
cudaEventCreateWithFlags(&start_event, eventflags);
cudaEventCreateWithFlags(&stop_event, eventflags);
printf("%d\t%d\t%d\t", (((m)-1)-0 + 1), threadsPerBlock, nstreams);
double (*d_cov)[1200];
cudaMalloc((void **)&d_cov, (((m)-1)+ 1)* sizeof (double [1200]));
double (*d_data)[1200];
cudaMalloc((void **)&d_data, (((n)-1)+ 1)* sizeof (double [1200]));
int i, j, k;
for (j = 0; j < _PB_M; j++)
{
mean[j] = SCALAR_VAL(0.0);
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
data[i][j] -= mean[j];
int ni = m;
int totalblocks = (ni +threadsPerBlock - 1) / threadsPerBlock;
int refblocks = totalblocks % nstreams;
int idlethreads = ni % threadsPerBlock ? threadsPerBlock - ni % threadsPerBlock : 0;
int blocksPerGrid[nstreams];
int threadsPerSubtask[nstreams];
int offset[nstreams];
for (int i = 0; i < nstreams; i++)
{
blocksPerGrid[i] = totalblocks / nstreams;
if (i < refblocks)
blocksPerGrid[i] ++;
threadsPerSubtask[i] = threadsPerBlock * blocksPerGrid[i];
}
threadsPerSubtask[nstreams - 1] -= idlethreads;
offset[0] = 0;
for (int i = 1; i < nstreams; i++)
offset[i] = offset[i-1] + threadsPerSubtask[i-1];
cudaEventRecord(start_event, 0);
cudaMemcpyAsync(d_data, data, (((n)-1)+ 1)* sizeof (double [1200]), cudaMemcpyHostToDevice, streams[0]);
for (int i = 0; i < nstreams; i++)
{
my_kernel<<<blocksPerGrid[i], threadsPerBlock,0, streams[i]>>>(m, n, float_n, d_cov, d_data, threadsPerSubtask[i], offset[i]);
}
cudaMemcpyAsync(cov, d_cov, (((m)-1)+ 1)* sizeof (double [1200]),cudaMemcpyDeviceToHost, streams[0]);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&time_elapsed, start_event, stop_event);
printf("%f\n", time_elapsed);
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
if (argc > 1)
threadsPerBlock = atoi(argv[1]);
if (argc > 2)
nstreams = atoi(argv[2]);
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,N,M,n,m);
POLYBENCH_2D_ARRAY_DECL(cov,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(cov),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(cov)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(cov);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
d412e147cbcaf88a327a426f026b10f696f06c40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012 by Jrn Dinkla, www.dinkla.com, All rights reserved.
*/
#include "kernel_copy3.h"
__global__
void kernel_copy_kernel3(const uchar4* d_input, uchar4* d_output, const CExtent extent) {
// Identitt bestimmen
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (extent.inBounds(x, y)) {
const int idx = extent.index(x, y);
const uchar4 value = d_input[idx]; // Daten holen
d_output[idx] = value; // Und schreiben
}
}
void kernel_copy3(const CExecConfig& config, const uchar4* d_input, uchar4* d_output, const CExtent& extent) {
hipLaunchKernelGGL(( kernel_copy_kernel3), dim3(config.grid),dim3(config.threads), 0, 0, d_input, d_output, extent);
}
| d412e147cbcaf88a327a426f026b10f696f06c40.cu | /*
* Copyright (c) 2012 by Jörn Dinkla, www.dinkla.com, All rights reserved.
*/
#include "kernel_copy3.h"
__global__
void kernel_copy_kernel3(const uchar4* d_input, uchar4* d_output, const CExtent extent) {
// Identität bestimmen
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (extent.inBounds(x, y)) {
const int idx = extent.index(x, y);
const uchar4 value = d_input[idx]; // Daten holen
d_output[idx] = value; // Und schreiben
}
}
void kernel_copy3(const CExecConfig& config, const uchar4* d_input, uchar4* d_output, const CExtent& extent) {
kernel_copy_kernel3<<<config.grid,config.threads>>>(d_input, d_output, extent);
}
|
10f0aa3420b8dbc91320fc7d58b23940427aa9f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) //
#define NF_SIZE (1 << 6) // Nf
#define NO_SHARED 0 // shared memory flag
#define SHARED 1 // shared memory flag
#define BLOCK_SIZE (1 << 6) // CUDA thread block
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION (1 << 0) //
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU_No_Shared;
int *h_SumOfArrayElements_GPU_Shared;
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
// shared .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_No_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
int sum = 0;
for (int i = -Nf; i <= Nf; i++) {
if (id + i >= N || id + i < 0) continue;
sum += d_ArrayElements[id + i];
}
d_SumOfArrayElements[id] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
// shared .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ int shared_buffer[];
__global__ void Sum_n_elements_Kernel_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
/*Todo*/
//1
if (thread_id == 0) {
for (int i = 0; i < Nf; i++) {
if (id + i < Nf) shared_buffer[i] = 0;
else shared_buffer[i] = d_ArrayElements[id + i - Nf];
}
}
if (thread_id == BLOCK_SIZE - 1) {
for (int i = 0; i < Nf; i++) {
/*
if (id + i < Nf) shared_buffer[i] = 0;
else shared_buffer[i] = d_ArrayElements[id + i - Nf];
*/
if (id + i >= N-1) shared_buffer[BLOCK_SIZE + Nf + i] =0;
else shared_buffer[BLOCK_SIZE + Nf + i] = d_ArrayElements[id + i + 1];
}
}
shared_buffer[thread_id + Nf] = d_ArrayElements[id];
//2
__syncthreads();
int sum = 0;
//3
for (int i = 0; i <= 2 * Nf; i++) {
sum += shared_buffer[thread_id + i];
}
//4
d_SumOfArrayElements[id] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf C
// GPU kernel
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
int i, j, sum;
for (i = 0; i < N; i++) {
sum = 0;
for (j = -Nf; j <= Nf; j++) {
if (i + j >= N || i + j < 0) continue;
sum += p_ArrayElements[i + j];
}
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// bin
// 4 , 4 Nf , N int
// -100 ~ 100
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("gen.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_No_Shared = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_Shared = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f, GPU_time_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_No_Shared, Nf, NO_SHARED);
GPU_time_NO_SHARED += device_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_Shared, Nf, SHARED);
GPU_time_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_No_Shared[i] || h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_Shared[i]) {
printf("%d : CPU : %d,\tGPU no shared : %d,\tGPU shared : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU_No_Shared[i], h_SumOfArrayElements_GPU_Shared[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("***GPU SHARED compute time : %.3f ms\n", GPU_time_SHARED / N_ITERATION);
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU_No_Shared);
free(h_SumOfArrayElements_GPU_Shared);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
// Shared_flag NO_SHARED SHARED
// flag
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag) {
hipError_t cudaStatus;
CUDA_CALL(hipSetDevice(0));
int *d_ArrayElements, *d_SumOfElements;
size_t mem_size;
mem_size = N * sizeof(int);
CUDA_CALL(hipMalloc(&d_ArrayElements, mem_size));
CUDA_CALL(hipMalloc(&d_SumOfElements, mem_size));
CUDA_CALL(hipMemcpy(d_ArrayElements, p_ArrayElements, mem_size, hipMemcpyHostToDevice));
dim3 blockDIm(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 gridDim(N / BLOCK_SIZE);
CHECK_TIME_INIT_GPU();
CHECK_TIME_START_GPU();
switch (Shared_flag)
{
case NO_SHARED:
Sum_n_elements_Kernel_No_shared << <gridDim, blockDIm >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
case SHARED:
Sum_n_elements_Kernel_shared << <gridDim, blockDIm, sizeof(int)* (BLOCK_SIZE + 2 * Nf) >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
}
CUDA_CALL(cudaStatus = hipDeviceSynchronize());
CHECK_TIME_END_GPU(device_time);
CHECK_TIME_DEST_GPU();
CUDA_CALL(hipMemcpy(p_SumOfElements_GPU, d_SumOfElements, mem_size, hipMemcpyDeviceToHost));
hipFree(d_ArrayElements);
hipFree(d_SumOfElements);
return cudaStatus;
}
| 10f0aa3420b8dbc91320fc7d58b23940427aa9f4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) // 전체 데이터 사이즈
#define NF_SIZE (1 << 6) // Nf 크기
#define NO_SHARED 0 // shared memory를 사용하지 않는 커널 실행 flag
#define SHARED 1 // shared memory를 사용하는 커널 실행 flag
#define BLOCK_SIZE (1 << 6) // CUDA 커널 thread block 사이즈
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION (1 << 0) // 실험 반복 횟수
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU_No_Shared;
int *h_SumOfArrayElements_GPU_Shared;
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
// 이 커널은 shared 메모리를 사용하지 않는다.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_No_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
int sum = 0;
for (int i = -Nf; i <= Nf; i++) {
if (id + i >= N || id + i < 0) continue;
sum += d_ArrayElements[id + i];
}
d_SumOfArrayElements[id] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
// 이 커널은 shared 메모리를 사용한다.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ int shared_buffer[];
__global__ void Sum_n_elements_Kernel_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
/*Todo*/
//1
if (thread_id == 0) {
for (int i = 0; i < Nf; i++) {
if (id + i < Nf) shared_buffer[i] = 0;
else shared_buffer[i] = d_ArrayElements[id + i - Nf];
}
}
if (thread_id == BLOCK_SIZE - 1) {
for (int i = 0; i < Nf; i++) {
/*
if (id + i < Nf) shared_buffer[i] = 0;
else shared_buffer[i] = d_ArrayElements[id + i - Nf];
*/
if (id + i >= N-1) shared_buffer[BLOCK_SIZE + Nf + i] =0;
else shared_buffer[BLOCK_SIZE + Nf + i] = d_ArrayElements[id + i + 1];
}
}
shared_buffer[thread_id + Nf] = d_ArrayElements[id];
//2
__syncthreads();
int sum = 0;
//3
for (int i = 0; i <= 2 * Nf; i++) {
sum += shared_buffer[thread_id + i];
}
//4
d_SumOfArrayElements[id] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 C 코드
// GPU kernel의 결과와 비교를 통해 옳은 계산을 하였는지 판단하는 데이터로 활용
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
int i, j, sum;
for (i = 0; i < N; i++) {
sum = 0;
for (j = -Nf; j <= Nf; j++) {
if (i + j >= N || i + j < 0) continue;
sum += p_ArrayElements[i + j];
}
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 주어진 bin 파일을 읽는 코드
// 첫 4바이트는 전체 데이터의 개수, 다음 4바이트는 Nf의 크기, 그 이후 N개의 int형 데이터가 저장
// 데이터는 -100 ~ 100 까지의 범위 안의 정수
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("gen.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_No_Shared = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_Shared = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f, GPU_time_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_No_Shared, Nf, NO_SHARED);
GPU_time_NO_SHARED += device_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_Shared, Nf, SHARED);
GPU_time_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_No_Shared[i] || h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_Shared[i]) {
printf("%d : CPU : %d,\tGPU no shared : %d,\tGPU shared : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU_No_Shared[i], h_SumOfArrayElements_GPU_Shared[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("***GPU SHARED compute time : %.3f ms\n", GPU_time_SHARED / N_ITERATION);
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU_No_Shared);
free(h_SumOfArrayElements_GPU_Shared);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 커널을 실행하기 전 필요한 자료들 준비 및 커널을 실행할 디바이스를 설정
// Shared_flag 입력 시 NO_SHARED 나 SHARED 중 한 개의 매크로를 넣으면
// flag값에 맞는 커널을 실행
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag) {
cudaError_t cudaStatus;
CUDA_CALL(cudaSetDevice(0));
int *d_ArrayElements, *d_SumOfElements;
size_t mem_size;
mem_size = N * sizeof(int);
CUDA_CALL(cudaMalloc(&d_ArrayElements, mem_size));
CUDA_CALL(cudaMalloc(&d_SumOfElements, mem_size));
CUDA_CALL(cudaMemcpy(d_ArrayElements, p_ArrayElements, mem_size, cudaMemcpyHostToDevice));
dim3 blockDIm(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 gridDim(N / BLOCK_SIZE);
CHECK_TIME_INIT_GPU();
CHECK_TIME_START_GPU();
switch (Shared_flag)
{
case NO_SHARED:
Sum_n_elements_Kernel_No_shared << <gridDim, blockDIm >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
case SHARED:
Sum_n_elements_Kernel_shared << <gridDim, blockDIm, sizeof(int)* (BLOCK_SIZE + 2 * Nf) >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
}
CUDA_CALL(cudaStatus = cudaDeviceSynchronize());
CHECK_TIME_END_GPU(device_time);
CHECK_TIME_DEST_GPU();
CUDA_CALL(cudaMemcpy(p_SumOfElements_GPU, d_SumOfElements, mem_size, cudaMemcpyDeviceToHost));
cudaFree(d_ArrayElements);
cudaFree(d_SumOfElements);
return cudaStatus;
}
|
c1772af49518e7b1b0313c07f8dcbc86039ba599.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "pytorch_cuda_helper.hpp"
#include "roi_align_rotated_cuda_kernel.cuh"
void ROIAlignRotatedForwardCUDAKernelLauncher(
const at::Tensor features, const at::Tensor rois, const float spatial_scale,
const int sample_num, const bool aligned, const bool clockwise,
const int channels, const int height, const int width, const int num_rois,
const int pooled_height, const int pooled_width, at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.type(), "ROIAlignRotatedLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
hipLaunchKernelGGL(( roi_align_rotated_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, aligned, clockwise, channels, height, width,
pooled_height, pooled_width, top_data);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void ROIAlignRotatedBackwardCUDAKernelLauncher(
const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale,
const int sample_num, const bool aligned, const bool clockwise,
const int channels, const int height, const int width, const int num_rois,
const int pooled_height, const int pooled_width, at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
hipLaunchKernelGGL(( roi_align_rotated_backward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, top_diff, rois_data, spatial_scale, sample_num,
aligned, clockwise, channels, height, width, pooled_height,
pooled_width, bottom_diff);
}));
AT_CUDA_CHECK(hipGetLastError());
}
| c1772af49518e7b1b0313c07f8dcbc86039ba599.cu | // Copyright (c) OpenMMLab. All rights reserved
#include "pytorch_cuda_helper.hpp"
#include "roi_align_rotated_cuda_kernel.cuh"
void ROIAlignRotatedForwardCUDAKernelLauncher(
const at::Tensor features, const at::Tensor rois, const float spatial_scale,
const int sample_num, const bool aligned, const bool clockwise,
const int channels, const int height, const int width, const int num_rois,
const int pooled_height, const int pooled_width, at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.type(), "ROIAlignRotatedLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
roi_align_rotated_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, aligned, clockwise, channels, height, width,
pooled_height, pooled_width, top_data);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void ROIAlignRotatedBackwardCUDAKernelLauncher(
const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale,
const int sample_num, const bool aligned, const bool clockwise,
const int channels, const int height, const int width, const int num_rois,
const int pooled_height, const int pooled_width, at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
roi_align_rotated_backward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, top_diff, rois_data, spatial_scale, sample_num,
aligned, clockwise, channels, height, width, pooled_height,
pooled_width, bottom_diff);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
|
1a46e1a752d400a6e41b03a2a8aeb5c1f497f78c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
#include <cmath>
#include "const.h"
#include "structures.h"
#include "global.h"
//user function
__device__ void initialize_variables_kernel_gpu(
double* variables) {
for(int j = 0; j < NVAR; j++) {
variables[j] = ff_variable_cuda[j];
}
}
// CUDA kernel function
__global__ void op_cuda_initialize_variables_kernel(
double *arg0,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initialize_variables_kernel_gpu(arg0+n*5);
}
}
//host stub function
void op_par_loop_initialize_variables_kernel(char const *name, op_set set,
op_arg arg0){
int nargs = 1;
op_arg args[1];
args[0] = arg0;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initialize_variables_kernel");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_initialize_variables_kernel), dim3(nblocks),dim3(nthread), 0, 0,
(double *) arg0.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
OP_kernels[0].transfer += (float)set->size * arg0.size * 2.0f;
}
| 1a46e1a752d400a6e41b03a2a8aeb5c1f497f78c.cu | //
// auto-generated by op2.py
//
#include <cmath>
#include "const.h"
#include "structures.h"
#include "global.h"
//user function
__device__ void initialize_variables_kernel_gpu(
double* variables) {
for(int j = 0; j < NVAR; j++) {
variables[j] = ff_variable_cuda[j];
}
}
// CUDA kernel function
__global__ void op_cuda_initialize_variables_kernel(
double *arg0,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initialize_variables_kernel_gpu(arg0+n*5);
}
}
//host stub function
void op_par_loop_initialize_variables_kernel(char const *name, op_set set,
op_arg arg0){
int nargs = 1;
op_arg args[1];
args[0] = arg0;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initialize_variables_kernel");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
op_cuda_initialize_variables_kernel<<<nblocks,nthread>>>(
(double *) arg0.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
OP_kernels[0].transfer += (float)set->size * arg0.size * 2.0f;
}
|
9fe2846f0873cfc8edb3f09f28a1e489906d4e27.hip | // !!! This is a file automatically generated by hipify!!!
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
#include <math.h>
#include <numpy/arrayobject.h>
#include <numpy/arrayscalars.h>
//////////////////////
//// Support Code
//////////////////////
namespace {
struct __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V1;
__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_12:
double __DUMMY_12;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
PyArrayObject* V5;
typedef npy_int64 dtype_V5;
PyObject* py_V7;
PyArrayObject* V7;
typedef npy_int64 dtype_V7;
PyObject* py_V9;
PyArrayObject* V9;
typedef npy_int64 dtype_V9;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V3)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V3)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 1 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 1 passed\n";
if (CudaNdarray_HOST_DIMS(V3)[2] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[2], 2);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 2 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 2 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[2] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[2])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[2], 2);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 2 passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
V5 = NULL;
if (py_V5 == Py_None) {
// We can either fail here or set V5 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
if (!PyArray_Check(py_V5)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V5)) {
PyArrayObject * tmp = (PyArrayObject*) py_V5;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V5),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V5) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V5));
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
V5 = (PyArrayObject*)(py_V5);
Py_XINCREF(V5);
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
V7 = NULL;
if (py_V7 == Py_None) {
// We can either fail here or set V7 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
if (!PyArray_Check(py_V7)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V7)) {
PyArrayObject * tmp = (PyArrayObject*) py_V7;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V7),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V7) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V7));
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
V7 = (PyArrayObject*)(py_V7);
Py_XINCREF(V7);
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
V9 = NULL;
if (py_V9 == Py_None) {
// We can either fail here or set V9 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
if (!PyArray_Check(py_V9)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V9)) {
PyArrayObject * tmp = (PyArrayObject*) py_V9;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V9),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V9) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V9));
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
V9 = (PyArrayObject*)(py_V9);
Py_XINCREF(V9);
{
// Op class GpuAlloc
int dims[3];
dims[0] = PyInt_AsLong((PyObject*)V5);
dims[1] = PyInt_AsLong((PyObject*)V7);
dims[2] = PyInt_AsLong((PyObject*)V9);
if(V1==NULL
||CudaNdarray_HOST_DIMS(V1)[0]!=dims[0]||CudaNdarray_HOST_DIMS(V1)[1]!=dims[1]||CudaNdarray_HOST_DIMS(V1)[2]!=dims[2]){
Py_XDECREF(V1);
V1 = (CudaNdarray*)CudaNdarray_New();
if (!V1)
{
// exception already set
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
if (CudaNdarray_alloc_contiguous(V1, 3, dims))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
if (1 && CudaNdarray_is_c_contiguous(V1))
{
hipError_t err = hipMemset(V1->devdata, 0,
CudaNdarray_SIZE(V1) * 4);
if (hipSuccess != err)
{
PyErr_Format(PyExc_MemoryError,
"GpuAlloc: Error memsetting %ld"
" bytes of device memory. %s",
(long)(CudaNdarray_SIZE(V1) * 4),
hipGetErrorString(err));
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
else if (CudaNdarray_CopyFromCudaNdarray(V1, V3, true))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
__label_11:
double __DUMMY_11;
}
__label_10:
if (V9) {
Py_XDECREF(V9);
}
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
if (V7) {
Py_XDECREF(V7);
}
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
if (V5) {
Py_XDECREF(V5);
}
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_executor(__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb* self) {
return self->run();
}
static void __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (6 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 6, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb* struct_ptr = new __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_executor), struct_ptr, __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC inita50996554e9a8a54e6687d74af498ccb(void){
import_array();
(void) Py_InitModule("a50996554e9a8a54e6687d74af498ccb", MyMethods);
}
| 9fe2846f0873cfc8edb3f09f28a1e489906d4e27.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
#include <math.h>
#include <numpy/arrayobject.h>
#include <numpy/arrayscalars.h>
//////////////////////
//// Support Code
//////////////////////
namespace {
struct __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V1;
__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_12:
double __DUMMY_12;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
PyArrayObject* V5;
typedef npy_int64 dtype_V5;
PyObject* py_V7;
PyArrayObject* V7;
typedef npy_int64 dtype_V7;
PyObject* py_V9;
PyArrayObject* V9;
typedef npy_int64 dtype_V9;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V3)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V3)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 1 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 1 passed\n";
if (CudaNdarray_HOST_DIMS(V3)[2] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[2], 2);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 2 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 2 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[2] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[2])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[2], 2);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 2 passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
V5 = NULL;
if (py_V5 == Py_None) {
// We can either fail here or set V5 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
if (!PyArray_Check(py_V5)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V5)) {
PyArrayObject * tmp = (PyArrayObject*) py_V5;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V5),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V5) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V5));
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
V5 = (PyArrayObject*)(py_V5);
Py_XINCREF(V5);
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
V7 = NULL;
if (py_V7 == Py_None) {
// We can either fail here or set V7 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
if (!PyArray_Check(py_V7)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V7)) {
PyArrayObject * tmp = (PyArrayObject*) py_V7;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V7),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V7) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V7));
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
V7 = (PyArrayObject*)(py_V7);
Py_XINCREF(V7);
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
V9 = NULL;
if (py_V9 == Py_None) {
// We can either fail here or set V9 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
if (!PyArray_Check(py_V9)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V9)) {
PyArrayObject * tmp = (PyArrayObject*) py_V9;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V9),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V9) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V9));
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
V9 = (PyArrayObject*)(py_V9);
Py_XINCREF(V9);
{
// Op class GpuAlloc
int dims[3];
dims[0] = PyInt_AsLong((PyObject*)V5);
dims[1] = PyInt_AsLong((PyObject*)V7);
dims[2] = PyInt_AsLong((PyObject*)V9);
if(V1==NULL
||CudaNdarray_HOST_DIMS(V1)[0]!=dims[0]||CudaNdarray_HOST_DIMS(V1)[1]!=dims[1]||CudaNdarray_HOST_DIMS(V1)[2]!=dims[2]){
Py_XDECREF(V1);
V1 = (CudaNdarray*)CudaNdarray_New();
if (!V1)
{
// exception already set
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
if (CudaNdarray_alloc_contiguous(V1, 3, dims))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
if (1 && CudaNdarray_is_c_contiguous(V1))
{
cudaError_t err = cudaMemset(V1->devdata, 0,
CudaNdarray_SIZE(V1) * 4);
if (cudaSuccess != err)
{
PyErr_Format(PyExc_MemoryError,
"GpuAlloc: Error memsetting %ld"
" bytes of device memory. %s",
(long)(CudaNdarray_SIZE(V1) * 4),
cudaGetErrorString(err));
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
else if (CudaNdarray_CopyFromCudaNdarray(V1, V3, true))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
__label_11:
double __DUMMY_11;
}
__label_10:
if (V9) {
Py_XDECREF(V9);
}
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
if (V7) {
Py_XDECREF(V7);
}
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
if (V5) {
Py_XDECREF(V5);
}
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_executor(__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb* self) {
return self->run();
}
static void __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (6 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 6, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb* struct_ptr = new __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_executor), struct_ptr, __struct_compiled_op_a50996554e9a8a54e6687d74af498ccb_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC inita50996554e9a8a54e6687d74af498ccb(void){
import_array();
(void) Py_InitModule("a50996554e9a8a54e6687d74af498ccb", MyMethods);
}
|
13a40c35869c4146b0aa967e88ed27ff34cc6346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zpotf2_kernels_old.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define REAL
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float shared_data[];
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float dble_shared_data[];
/******************************************************************************/
__global__ void sdot_kernel_batched(
int n, float **x_array, int incx, int offset,
magma_int_t *info_array, int gbstep)
{
int tx = threadIdx.x;
float *x = x_array[blockIdx.z]+offset;
float *sdata = dble_shared_data;
float res = MAGMA_S_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_S_REAL(res * MAGMA_S_CONJ(res));
__syncthreads();
for (int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile float* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
float xreal = MAGMA_S_REAL(x[n*incx]);
x[n*incx] = MAGMA_S_MAKE(sqrt(xreal - sdata[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
info_array[blockIdx.z] = offset + gbstep + 1;
}
}
}
/******************************************************************************/
void magma_spotf2_sdot_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Sdot
1) performs sdot sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > MAX_NTHREADS) {
fprintf( stderr, "%s: n = %lld > %lld is not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
dim3 grid(1, 1, batchCount);
size_t shmem = threadSize * sizeof(float);
hipLaunchKernelGGL(( sdot_kernel_batched)
, dim3(grid), dim3(threadSize), shmem, queue->cuda_stream() ,
n, x_array, incx, offset, info_array, gbstep);
}
/******************************************************************************/
__global__ void sscal_kernel_batched(
int n, float **x_array, int incx, int offset,
magma_int_t *info_array)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int id = threadIdx.x;
float *x = x_array[blockIdx.z]+offset;
__shared__ float factor;
if (threadIdx.x == 0) {
factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id > 0) {
x[id*incx] = x[id*incx] * factor;
//printf("x=%f", x[id*incx]);
}
}
/******************************************************************************/
void magma_spotf2_sscal_batched(
magma_int_t n, float **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Sscal perform x[1:n-1]/x[0]
*/
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
hipLaunchKernelGGL(( sscal_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x_array, incx, offset, info_array);
}
/******************************************************************************/
__global__ void slacgv_kernel_batched(int n, float **x_array, int incx, int offset)
{
int id = threadIdx.x;
float *x = x_array[blockIdx.z]+offset;
if ( id < n ) {
x[id*incx] = MAGMA_S_CONJ(x[id*incx]);
}
}
/***************************************************************************//**
Purpose
-------
SLACGV conjugates a real vector of length N.
Arguments
---------
N (input) INTEGER
The length of the vector X. N >= 0.
X (input/output) REAL array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
INCX (input) INTEGER
The spacing between successive elements of X.
@ingroup magma_lacgv_batched
*******************************************************************************/
void magma_slacgv_batched(
magma_int_t n, float **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
hipLaunchKernelGGL(( slacgv_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x_array, incx, offset);
}
/******************************************************************************/
static __device__ void spotf2_device(int m, int n,
float *A, int lda,
float alpha,
float beta, magma_int_t *info, int gbstep)
{
/*
Each thread block load entire A into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
// checkinfo to avoid computation of the singular matrix
if (*info != 0 ) return;
int tx = threadIdx.x;
float *sdata_A = shared_data;
__shared__ float factor;
__shared__ float sum[POTF2_TILE_SIZE];
// load A into sdata_A
if (tx < m)
{
for (int i=0; i < n; i++)
{
sdata_A[tx + i * m] = A[tx + i * lda];
}
}
__syncthreads();
for (int iter=0; iter < n; iter++)
{
float res = MAGMA_D_ZERO;
float res1 = MAGMA_S_ZERO;
//1) performs sdot sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1])
//2) updates A[iter,iter] = sqrt(A[iter,iter]-sum);
if (tx < iter)
{
res = MAGMA_S_REAL (sdata_A[iter + tx * m] * MAGMA_S_CONJ(sdata_A[iter + tx * m]));
sum[tx] = res;
}
else
{
sum[tx] = 0.0;
}
__syncthreads();
magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); //tried on K40: if m=32 n=32 the overall spotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms
//magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28.
//magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);
__shared__ float xreal;
if (tx == 0) {
xreal = MAGMA_S_REAL(sdata_A[iter + iter * m]);
sdata_A[iter + iter * m] = MAGMA_S_MAKE(sqrt(xreal - sum[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
*info = iter + gbstep + 1;
}
}
__syncthreads();
if (xreal <= MAGMA_D_ZERO) return;
__syncthreads();
//slacgv conjugates a real vector of length iter. //TODO
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_S_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// sgemv
// Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row).
if (tx < m && tx > iter)
{
for (int j=0; j < iter; j++)
{
res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the slacgv conj to be done automatically here implicitly.
}
sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta;
}
__syncthreads();
//slacgv conjugates a real vector of length iter.
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_S_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// sscal perform A[iter:n-1, iter]/A[iter,iter];
if (tx == 0) {
factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(sdata_A[iter + iter * m]), 0.0);
}
__syncthreads();
if ( tx < m && tx > iter) {
sdata_A[ tx + iter * m ] *= factor;
}
__syncthreads();
}// end of iter
//copy sdata_A to A
if (tx < m)
{
for (int i=0; i < n; i++)
{
A[tx + i * lda] = sdata_A[tx + i * m];
}
}
}
/******************************************************************************/
__global__ void spotf2_kernel_batched(int m, int n,
float **dA_array, int lda,
float alpha,
float beta,
magma_int_t *info_array, int gbstep)
{
/*
Each thread block load entire dA_array[blockIdx.z] into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
int batchid = blockIdx.z;
spotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep);
}
/******************************************************************************/
__global__ void spotf2_kernel(int m, int n,
float *dA, int lda,
float alpha,
float beta,
magma_int_t *info)
{
spotf2_device(m, n, dA, lda, alpha, beta, info, 0);
}
/***************************************************************************//**
Purpose
-------
spotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
m INTEGER
The number of rows of the matrix A.
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A, dimension (lda,n)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
lda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info_array INTEGER array, dimension (batchCount).
Each is the info parameter for the corresponding matrix A
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@param[in]
gbstep INTEGER
Internal use, global step.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_potf2_batched
*******************************************************************************/
extern "C" magma_int_t
magma_spotf2_tile_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ );
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
float alpha = MAGMA_S_NEG_ONE;
float beta = MAGMA_S_ONE;
dim3 dimGrid(1, 1, batchCount);
dim3 threads(POTF2_TILE_SIZE, 1);
size_t shmem = sizeof(float)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1);
hipLaunchKernelGGL(( spotf2_kernel_batched)
, dim3(dimGrid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, dA_array, lda, alpha, beta, info_array, gbstep);
return arginfo;
}
| 13a40c35869c4146b0aa967e88ed27ff34cc6346.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zpotf2_kernels_old.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define REAL
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float shared_data[];
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float dble_shared_data[];
/******************************************************************************/
__global__ void sdot_kernel_batched(
int n, float **x_array, int incx, int offset,
magma_int_t *info_array, int gbstep)
{
int tx = threadIdx.x;
float *x = x_array[blockIdx.z]+offset;
float *sdata = dble_shared_data;
float res = MAGMA_S_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_S_REAL(res * MAGMA_S_CONJ(res));
__syncthreads();
for (int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile float* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
float xreal = MAGMA_S_REAL(x[n*incx]);
x[n*incx] = MAGMA_S_MAKE(sqrt(xreal - sdata[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
info_array[blockIdx.z] = offset + gbstep + 1;
}
}
}
/******************************************************************************/
void magma_spotf2_sdot_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Sdot
1) performs sdot sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > MAX_NTHREADS) {
fprintf( stderr, "%s: n = %lld > %lld is not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
dim3 grid(1, 1, batchCount);
size_t shmem = threadSize * sizeof(float);
sdot_kernel_batched
<<< grid, threadSize, shmem, queue->cuda_stream() >>>
(n, x_array, incx, offset, info_array, gbstep);
}
/******************************************************************************/
__global__ void sscal_kernel_batched(
int n, float **x_array, int incx, int offset,
magma_int_t *info_array)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int id = threadIdx.x;
float *x = x_array[blockIdx.z]+offset;
__shared__ float factor;
if (threadIdx.x == 0) {
factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id > 0) {
x[id*incx] = x[id*incx] * factor;
//printf("x=%f", x[id*incx]);
}
}
/******************************************************************************/
void magma_spotf2_sscal_batched(
magma_int_t n, float **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Sscal perform x[1:n-1]/x[0]
*/
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
sscal_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x_array, incx, offset, info_array);
}
/******************************************************************************/
__global__ void slacgv_kernel_batched(int n, float **x_array, int incx, int offset)
{
int id = threadIdx.x;
float *x = x_array[blockIdx.z]+offset;
if ( id < n ) {
x[id*incx] = MAGMA_S_CONJ(x[id*incx]);
}
}
/***************************************************************************//**
Purpose
-------
SLACGV conjugates a real vector of length N.
Arguments
---------
N (input) INTEGER
The length of the vector X. N >= 0.
X (input/output) REAL array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
INCX (input) INTEGER
The spacing between successive elements of X.
@ingroup magma_lacgv_batched
*******************************************************************************/
void magma_slacgv_batched(
magma_int_t n, float **x_array, magma_int_t incx,
magma_int_t offset, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
slacgv_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x_array, incx, offset);
}
/******************************************************************************/
static __device__ void spotf2_device(int m, int n,
float *A, int lda,
float alpha,
float beta, magma_int_t *info, int gbstep)
{
/*
Each thread block load entire A into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
// checkinfo to avoid computation of the singular matrix
if (*info != 0 ) return;
int tx = threadIdx.x;
float *sdata_A = shared_data;
__shared__ float factor;
__shared__ float sum[POTF2_TILE_SIZE];
// load A into sdata_A
if (tx < m)
{
for (int i=0; i < n; i++)
{
sdata_A[tx + i * m] = A[tx + i * lda];
}
}
__syncthreads();
for (int iter=0; iter < n; iter++)
{
float res = MAGMA_D_ZERO;
float res1 = MAGMA_S_ZERO;
//1) performs sdot sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1])
//2) updates A[iter,iter] = sqrt(A[iter,iter]-sum);
if (tx < iter)
{
res = MAGMA_S_REAL (sdata_A[iter + tx * m] * MAGMA_S_CONJ(sdata_A[iter + tx * m]));
sum[tx] = res;
}
else
{
sum[tx] = 0.0;
}
__syncthreads();
magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); //tried on K40: if m=32 n=32 the overall spotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms
//magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28.
//magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);
__shared__ float xreal;
if (tx == 0) {
xreal = MAGMA_S_REAL(sdata_A[iter + iter * m]);
sdata_A[iter + iter * m] = MAGMA_S_MAKE(sqrt(xreal - sum[0]), 0);
if (xreal <= MAGMA_D_ZERO) {
*info = iter + gbstep + 1;
}
}
__syncthreads();
if (xreal <= MAGMA_D_ZERO) return;
__syncthreads();
//slacgv conjugates a real vector of length iter. //TODO
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_S_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// sgemv
// Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row).
if (tx < m && tx > iter)
{
for (int j=0; j < iter; j++)
{
res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the slacgv conj to be done automatically here implicitly.
}
sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta;
}
__syncthreads();
//slacgv conjugates a real vector of length iter.
#ifdef COMPLEX
if (tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_S_CONJ(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// sscal perform A[iter:n-1, iter]/A[iter,iter];
if (tx == 0) {
factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(sdata_A[iter + iter * m]), 0.0);
}
__syncthreads();
if ( tx < m && tx > iter) {
sdata_A[ tx + iter * m ] *= factor;
}
__syncthreads();
}// end of iter
//copy sdata_A to A
if (tx < m)
{
for (int i=0; i < n; i++)
{
A[tx + i * lda] = sdata_A[tx + i * m];
}
}
}
/******************************************************************************/
__global__ void spotf2_kernel_batched(int m, int n,
float **dA_array, int lda,
float alpha,
float beta,
magma_int_t *info_array, int gbstep)
{
/*
Each thread block load entire dA_array[blockIdx.z] into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
int batchid = blockIdx.z;
spotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep);
}
/******************************************************************************/
__global__ void spotf2_kernel(int m, int n,
float *dA, int lda,
float alpha,
float beta,
magma_int_t *info)
{
spotf2_device(m, n, dA, lda, alpha, beta, info, 0);
}
/***************************************************************************//**
Purpose
-------
spotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
m INTEGER
The number of rows of the matrix A.
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A, dimension (lda,n)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
lda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info_array INTEGER array, dimension (batchCount).
Each is the info parameter for the corresponding matrix A
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@param[in]
gbstep INTEGER
Internal use, global step.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_potf2_batched
*******************************************************************************/
extern "C" magma_int_t
magma_spotf2_tile_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
fprintf( stderr, "%s: uplo=upper is not yet implemented\n", __func__ );
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
float alpha = MAGMA_S_NEG_ONE;
float beta = MAGMA_S_ONE;
dim3 dimGrid(1, 1, batchCount);
dim3 threads(POTF2_TILE_SIZE, 1);
size_t shmem = sizeof(float)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1);
spotf2_kernel_batched
<<< dimGrid, threads, shmem, queue->cuda_stream() >>>
(m, n, dA_array, lda, alpha, beta, info_array, gbstep);
return arginfo;
}
|
2d62f26632ff8d924c977c9922717e27ba22cd15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "bboxUtils.h"
#define CUBLAS_CHECK(condition) \
do \
{ \
hipblasStatus_t status = condition; \
if (status != HIPBLAS_STATUS_SUCCESS) \
{ \
printf("%s %d CUBLAS FAIL %s\n", __FILE__, __LINE__, cublasGetErrorString(status)); \
} \
} while (0)
namespace nvinfer1
{
namespace plugin
{
size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W)
{
if (acrossSpatial)
return sizeof(float) * C * H * W;
else
return (size_t) 0;
}
} // namespace plugin
} // namespace nvinfer1
size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W)
{
if (acrossSpatial)
return sizeof(float) * C * H * W;
else
return (size_t) 0;
}
template <unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void normalizeNotAcrossSpatialKernel(
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const float* scale,
float* inputData,
float* outputData)
{
const int dim = C * H * W;
const int spatialDim = H * W;
const int tile = 32;
const int numTile = (spatialDim + tile - 1) / tile;
for (int n = blockIdx.x; n < N * numTile; n += gridDim.x)
{
float* input = inputData + (n / numTile) * dim;
float* output = outputData + (n / numTile) * dim;
__shared__ float sum[tile];
float localsum = 0.0F;
for (int i = threadIdx.x; i < tile; i += nthds_per_cta)
{
sum[i] = 0.0F;
}
__syncthreads();
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
float data = 0.0F;
if (col < spatialDim)
data = input[row * spatialDim + col];
localsum += data * data;
}
atomicAdd(&sum[threadIdx.x & 31], localsum);
__syncthreads();
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
if (col < spatialDim)
{
int offset = row * spatialDim + col;
output[offset] = input[offset] / sqrt(sum[threadIdx.x & 31] + eps);
}
}
if (channelShared)
{
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
if (col < spatialDim)
output[row * spatialDim + col] *= scale[0];
}
}
else
{
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
if (col < spatialDim)
output[row * spatialDim + col] *= scale[row];
}
}
}
}
pluginStatus_t normalizeNotAcrossSpatialGpu(
hipStream_t stream,
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const void* scale,
const void* inputData,
void* outputData)
{
const int BS = 128;
const int GS = 256;
// assumes warp size == 32
ASSERT(BS % 32 == 0);
hipLaunchKernelGGL(( normalizeNotAcrossSpatialKernel<BS>), dim3(GS), dim3(BS), 0, stream, channelShared, N, C, H, W, eps,
(const float*) scale,
(float*) inputData,
(float*) outputData);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
__global__ void squareKernel(
const int n,
const float* x,
float* y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n; i += gridDim.x * blockDim.x)
{
y[i] = x[i] * x[i];
}
}
__global__ void scalChannelKernel(
const int n,
const int spatialDim,
const float* inputData,
const float* scale,
float* outputData)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n; i += gridDim.x * blockDim.x)
{
// scale factors are indepedent across different channels
// scale[i / spatialDim]: find the right scale factor for specific channels
outputData[i] = inputData[i] / scale[i / spatialDim];
}
}
namespace nvinfer1
{
namespace plugin
{
pluginStatus_t normalizeInference(
hipStream_t stream,
hipblasHandle_t handle,
const bool acrossSpatial,
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const void* scale,
const void* inputData,
void* outputData,
void* workspace)
{
const int dim = C * H * W;
// Normalization is conducted for each sample from the batch indepdently
if (acrossSpatial)
{
float* input = (float*) const_cast<void*>(inputData);
float* output = (float*) outputData;
float* buffer = (float*) workspace;
for (int n = 0; n < N; ++n)
{
// Take the square of each element in the input
hipLaunchKernelGGL(( squareKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, input, buffer);
float normsqr = 0.0F;
// Sum up all the squared elements
CUBLAS_CHECK(hipblasSasum(handle, dim, buffer, 1, &normsqr));
// Make a copy of the input to the output
CUBLAS_CHECK(hipblasScopy(handle, dim, input, 1, output, 1));
// Calculate the inverse of the square root of the sum
// Use eps to prevent being divided by zero
normsqr = 1 / sqrt(normsqr + eps);
// Scale all the outputs by normsqr
CUBLAS_CHECK(hipblasSscal(handle, dim, &normsqr, output, 1));
// If channel shared is true, scale all the outputs
if (channelShared)
{
CUBLAS_CHECK(hipblasSscal(handle, dim, (float*) scale, output, 1));
}
// Use different scale factors for different channels
else
{
// scale the output according to channels
hipLaunchKernelGGL(( scalChannelKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, H * W, output, (float*) scale, output);
}
// Move cursors
input += dim;
output += dim;
}
return STATUS_SUCCESS;
}
// Normalization ignoring the batch
else
{
return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData);
}
}
} // namespace plugin
} // namespace nvinfer1
pluginStatus_t normalizeInference(
hipStream_t stream,
hipblasHandle_t handle,
const bool acrossSpatial,
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const void* scale,
const void* inputData,
void* outputData,
void* workspace)
{
const int dim = C * H * W;
// Normalization is conducted for each sample from the batch indepdently
if (acrossSpatial)
{
float* input = (float*) const_cast<void*>(inputData);
float* output = (float*) outputData;
float* buffer = (float*) workspace;
for (int n = 0; n < N; ++n)
{
// Take the square of each element in the input
hipLaunchKernelGGL(( squareKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, input, buffer);
float normsqr = 0.0F;
// Sum up all the squared elements
CUBLAS_CHECK(hipblasSasum(handle, dim, buffer, 1, &normsqr));
// Make a copy of the input to the output
CUBLAS_CHECK(hipblasScopy(handle, dim, input, 1, output, 1));
// Calculate the inverse of the square root of the sum
// Use eps to prevent being divided by zero
normsqr = 1 / sqrt(normsqr + eps);
// Scale all the outputs by normsqr
CUBLAS_CHECK(hipblasSscal(handle, dim, &normsqr, output, 1));
// If channel shared is true, scale all the outputs
if (channelShared)
{
CUBLAS_CHECK(hipblasSscal(handle, dim, (float*) scale, output, 1));
}
// Use different scale factors for different channels
else
{
// scale the output according to channels
hipLaunchKernelGGL(( scalChannelKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, H * W, output, (float*) scale, output);
}
// Move cursors
input += dim;
output += dim;
}
return STATUS_SUCCESS;
}
// Normalization ignoring the batch
else
{
return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData);
}
}
| 2d62f26632ff8d924c977c9922717e27ba22cd15.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "bboxUtils.h"
#define CUBLAS_CHECK(condition) \
do \
{ \
cublasStatus_t status = condition; \
if (status != CUBLAS_STATUS_SUCCESS) \
{ \
printf("%s %d CUBLAS FAIL %s\n", __FILE__, __LINE__, cublasGetErrorString(status)); \
} \
} while (0)
namespace nvinfer1
{
namespace plugin
{
size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W)
{
if (acrossSpatial)
return sizeof(float) * C * H * W;
else
return (size_t) 0;
}
} // namespace plugin
} // namespace nvinfer1
size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W)
{
if (acrossSpatial)
return sizeof(float) * C * H * W;
else
return (size_t) 0;
}
template <unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void normalizeNotAcrossSpatialKernel(
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const float* scale,
float* inputData,
float* outputData)
{
const int dim = C * H * W;
const int spatialDim = H * W;
const int tile = 32;
const int numTile = (spatialDim + tile - 1) / tile;
for (int n = blockIdx.x; n < N * numTile; n += gridDim.x)
{
float* input = inputData + (n / numTile) * dim;
float* output = outputData + (n / numTile) * dim;
__shared__ float sum[tile];
float localsum = 0.0F;
for (int i = threadIdx.x; i < tile; i += nthds_per_cta)
{
sum[i] = 0.0F;
}
__syncthreads();
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
float data = 0.0F;
if (col < spatialDim)
data = input[row * spatialDim + col];
localsum += data * data;
}
atomicAdd(&sum[threadIdx.x & 31], localsum);
__syncthreads();
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
if (col < spatialDim)
{
int offset = row * spatialDim + col;
output[offset] = input[offset] / sqrt(sum[threadIdx.x & 31] + eps);
}
}
if (channelShared)
{
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
if (col < spatialDim)
output[row * spatialDim + col] *= scale[0];
}
}
else
{
for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta)
{
int row = i / tile;
int col = (n % numTile) * tile + i % tile;
if (col < spatialDim)
output[row * spatialDim + col] *= scale[row];
}
}
}
}
pluginStatus_t normalizeNotAcrossSpatialGpu(
cudaStream_t stream,
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const void* scale,
const void* inputData,
void* outputData)
{
const int BS = 128;
const int GS = 256;
// assumes warp size == 32
ASSERT(BS % 32 == 0);
normalizeNotAcrossSpatialKernel<BS><<<GS, BS, 0, stream>>>(channelShared, N, C, H, W, eps,
(const float*) scale,
(float*) inputData,
(float*) outputData);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
__global__ void squareKernel(
const int n,
const float* x,
float* y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n; i += gridDim.x * blockDim.x)
{
y[i] = x[i] * x[i];
}
}
__global__ void scalChannelKernel(
const int n,
const int spatialDim,
const float* inputData,
const float* scale,
float* outputData)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n; i += gridDim.x * blockDim.x)
{
// scale factors are indepedent across different channels
// scale[i / spatialDim]: find the right scale factor for specific channels
outputData[i] = inputData[i] / scale[i / spatialDim];
}
}
namespace nvinfer1
{
namespace plugin
{
pluginStatus_t normalizeInference(
cudaStream_t stream,
cublasHandle_t handle,
const bool acrossSpatial,
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const void* scale,
const void* inputData,
void* outputData,
void* workspace)
{
const int dim = C * H * W;
// Normalization is conducted for each sample from the batch indepdently
if (acrossSpatial)
{
float* input = (float*) const_cast<void*>(inputData);
float* output = (float*) outputData;
float* buffer = (float*) workspace;
for (int n = 0; n < N; ++n)
{
// Take the square of each element in the input
squareKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, input, buffer);
float normsqr = 0.0F;
// Sum up all the squared elements
CUBLAS_CHECK(cublasSasum(handle, dim, buffer, 1, &normsqr));
// Make a copy of the input to the output
CUBLAS_CHECK(cublasScopy(handle, dim, input, 1, output, 1));
// Calculate the inverse of the square root of the sum
// Use eps to prevent being divided by zero
normsqr = 1 / sqrt(normsqr + eps);
// Scale all the outputs by normsqr
CUBLAS_CHECK(cublasSscal(handle, dim, &normsqr, output, 1));
// If channel shared is true, scale all the outputs
if (channelShared)
{
CUBLAS_CHECK(cublasSscal(handle, dim, (float*) scale, output, 1));
}
// Use different scale factors for different channels
else
{
// scale the output according to channels
scalChannelKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, H * W, output, (float*) scale, output);
}
// Move cursors
input += dim;
output += dim;
}
return STATUS_SUCCESS;
}
// Normalization ignoring the batch
else
{
return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData);
}
}
} // namespace plugin
} // namespace nvinfer1
pluginStatus_t normalizeInference(
cudaStream_t stream,
cublasHandle_t handle,
const bool acrossSpatial,
const bool channelShared,
const int N,
const int C,
const int H,
const int W,
const float eps,
const void* scale,
const void* inputData,
void* outputData,
void* workspace)
{
const int dim = C * H * W;
// Normalization is conducted for each sample from the batch indepdently
if (acrossSpatial)
{
float* input = (float*) const_cast<void*>(inputData);
float* output = (float*) outputData;
float* buffer = (float*) workspace;
for (int n = 0; n < N; ++n)
{
// Take the square of each element in the input
squareKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, input, buffer);
float normsqr = 0.0F;
// Sum up all the squared elements
CUBLAS_CHECK(cublasSasum(handle, dim, buffer, 1, &normsqr));
// Make a copy of the input to the output
CUBLAS_CHECK(cublasScopy(handle, dim, input, 1, output, 1));
// Calculate the inverse of the square root of the sum
// Use eps to prevent being divided by zero
normsqr = 1 / sqrt(normsqr + eps);
// Scale all the outputs by normsqr
CUBLAS_CHECK(cublasSscal(handle, dim, &normsqr, output, 1));
// If channel shared is true, scale all the outputs
if (channelShared)
{
CUBLAS_CHECK(cublasSscal(handle, dim, (float*) scale, output, 1));
}
// Use different scale factors for different channels
else
{
// scale the output according to channels
scalChannelKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, H * W, output, (float*) scale, output);
}
// Move cursors
input += dim;
output += dim;
}
return STATUS_SUCCESS;
}
// Normalization ignoring the batch
else
{
return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData);
}
}
|
f865e61c731f4628f25fc359e7a792f28529d065.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/sort.h>
#include <thrust/adjacent_difference.h>
#include <thrust/device_ptr.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/unique.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <vector>
#include <numeric>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/coalesce_impl.cuh"
#include "include/hip/hip_fp16.h"
__global__ void FlattenIndicesKernel(int64_t *flatten_input_indices, const size_t indices_num, const size_t values_num,
const int64_t *input_indices, const int64_t *input_shape) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < values_num; pos += blockDim.x * gridDim.x) {
int64_t temp = 0;
int64_t temp2 = 0;
if (pos < values_num) {
for (int x = 0; x < indices_num; x++) {
if (x != indices_num - 1) {
temp2 = input_indices[pos + (x * values_num)];
for (int j = (x + 1); j < indices_num; j++) {
temp2 *= input_shape[j];
}
temp += temp2;
temp2 = 0;
} else {
temp += input_indices[pos + (x * values_num)];
}
}
flatten_input_indices[pos] = temp;
}
}
}
template <typename T>
__global__ void CoalesceKernel(int64_t *origin_indices, int64_t newNnz, int64_t *unique_indices,
const size_t indices_num, const size_t values_num, const int64_t *input_indices,
const T *input_values, int64_t *output_indices, T *output_value) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < indices_num * values_num;
pos += blockDim.x * gridDim.x) {
if (pos < newNnz) {
output_value[pos] = 0;
const int begin = unique_indices[pos];
const int end = (pos < newNnz - 1) ? unique_indices[pos + 1] : values_num;
for (int row = begin; row < end; row++) {
output_value[pos] += input_values[origin_indices[row]];
}
output_indices[pos] = input_indices[origin_indices[unique_indices[pos]]];
} else if (pos < (newNnz * 2)) {
for (int x = 0; x < indices_num; x++) {
output_indices[(pos - newNnz) + (x * newNnz)] =
input_indices[origin_indices[unique_indices[pos - newNnz]] + x * values_num];
}
}
}
}
__global__ void CoalesceKernelCheck(const int64_t *indices, const int64_t *input_shape, const size_t indices_num,
size_t values_num, int *ret_flag) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indices_num * values_num; i += gridDim.x * blockDim.x) {
if (indices[i] < 0) {
*ret_flag = 1;
return;
}
int shape_pos = i / values_num;
if (input_shape[shape_pos] <= 0) {
*ret_flag = 2;
return;
}
if (indices[i] >= input_shape[shape_pos]) {
*ret_flag = 3;
return;
}
}
}
template <typename T>
int Coalesce(int64_t *origin_indices, int64_t *unique_indices, const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host, int64_t *flatten_input_indices, const int64_t *input_indices,
const T *input_values, const int64_t *input_shape, int64_t *output_indices, T *output_value,
int64_t *output_shape, const uint32_t &device_id, hipStream_t cuda_stream) {
size_t allelement = indices_num * values_num;
int *ret_flag_device = nullptr;
(void)hipMalloc(&ret_flag_device, sizeof(int));
(void)hipMemset(ret_flag_device, 0, sizeof(int));
hipLaunchKernelGGL(( CoalesceKernelCheck), dim3(CUDA_BLOCKS(device_id, allelement)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
input_indices, input_shape, indices_num, values_num, ret_flag_device);
(void)hipMemcpy(ret_flag_host, ret_flag_device, sizeof(int), hipMemcpyDeviceToHost);
(void)hipFree(ret_flag_device);
if (*ret_flag_host != 0) {
return -1;
}
auto policy = thrust::hip::par.on(cuda_stream);
thrust::copy(thrust::device_pointer_cast(input_shape), thrust::device_pointer_cast(input_shape) + shape_elements,
thrust::device_pointer_cast(output_shape));
hipLaunchKernelGGL(( FlattenIndicesKernel), dim3(CUDA_BLOCKS(device_id, values_num)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
flatten_input_indices, indices_num, values_num, input_indices, input_shape);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::counting_iterator<int64_t> countIterI(0);
thrust::copy(policy, countIterI, countIterI + values_num, origin_indices);
thrust::sort_by_key(policy, thrust::device_pointer_cast(flatten_input_indices),
thrust::device_pointer_cast(flatten_input_indices) + values_num,
thrust::device_pointer_cast(origin_indices));
thrust::copy(policy, countIterO, countIterO + values_num, unique_indices);
thrust::pair<thrust::device_ptr<int64_t>, thrust::device_ptr<int64_t>> newEnd;
newEnd = thrust::unique_by_key(policy, thrust::device_pointer_cast(flatten_input_indices),
thrust::device_pointer_cast(flatten_input_indices) + values_num,
thrust::device_pointer_cast(unique_indices));
int64_t newNnz = newEnd.first - thrust::device_pointer_cast(flatten_input_indices);
hipLaunchKernelGGL(( CoalesceKernel), dim3(CUDA_BLOCKS(device_id, allelement)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
origin_indices, newNnz, unique_indices, indices_num, values_num, input_indices, input_values, output_indices,
output_value);
int output_size = newNnz;
return output_size;
}
template CUDA_LIB_EXPORT int Coalesce<float>(int64_t *origin_indices, int64_t *unique_indices,
const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host,
int64_t *flatten_input_indices, const int64_t *input_indices,
const float *input_values, const int64_t *input_shape,
int64_t *output_indices, float *output_value, int64_t *output_shape,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int Coalesce<half>(int64_t *origin_indices, int64_t *unique_indices,
const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host, int64_t *flatten_input_indices,
const int64_t *input_indices, const half *input_values,
const int64_t *input_shape, int64_t *output_indices, half *output_value,
int64_t *output_shape, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int Coalesce<double>(int64_t *origin_indices, int64_t *unique_indices,
const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host,
int64_t *flatten_input_indices, const int64_t *input_indices,
const double *input_values, const int64_t *input_shape,
int64_t *output_indices, double *output_value, int64_t *output_shape,
const uint32_t &device_id, hipStream_t cuda_stream);
| f865e61c731f4628f25fc359e7a792f28529d065.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/sort.h>
#include <thrust/adjacent_difference.h>
#include <thrust/device_ptr.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/unique.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <vector>
#include <numeric>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/coalesce_impl.cuh"
#include "include/cuda_fp16.h"
__global__ void FlattenIndicesKernel(int64_t *flatten_input_indices, const size_t indices_num, const size_t values_num,
const int64_t *input_indices, const int64_t *input_shape) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < values_num; pos += blockDim.x * gridDim.x) {
int64_t temp = 0;
int64_t temp2 = 0;
if (pos < values_num) {
for (int x = 0; x < indices_num; x++) {
if (x != indices_num - 1) {
temp2 = input_indices[pos + (x * values_num)];
for (int j = (x + 1); j < indices_num; j++) {
temp2 *= input_shape[j];
}
temp += temp2;
temp2 = 0;
} else {
temp += input_indices[pos + (x * values_num)];
}
}
flatten_input_indices[pos] = temp;
}
}
}
template <typename T>
__global__ void CoalesceKernel(int64_t *origin_indices, int64_t newNnz, int64_t *unique_indices,
const size_t indices_num, const size_t values_num, const int64_t *input_indices,
const T *input_values, int64_t *output_indices, T *output_value) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < indices_num * values_num;
pos += blockDim.x * gridDim.x) {
if (pos < newNnz) {
output_value[pos] = 0;
const int begin = unique_indices[pos];
const int end = (pos < newNnz - 1) ? unique_indices[pos + 1] : values_num;
for (int row = begin; row < end; row++) {
output_value[pos] += input_values[origin_indices[row]];
}
output_indices[pos] = input_indices[origin_indices[unique_indices[pos]]];
} else if (pos < (newNnz * 2)) {
for (int x = 0; x < indices_num; x++) {
output_indices[(pos - newNnz) + (x * newNnz)] =
input_indices[origin_indices[unique_indices[pos - newNnz]] + x * values_num];
}
}
}
}
__global__ void CoalesceKernelCheck(const int64_t *indices, const int64_t *input_shape, const size_t indices_num,
size_t values_num, int *ret_flag) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indices_num * values_num; i += gridDim.x * blockDim.x) {
if (indices[i] < 0) {
*ret_flag = 1;
return;
}
int shape_pos = i / values_num;
if (input_shape[shape_pos] <= 0) {
*ret_flag = 2;
return;
}
if (indices[i] >= input_shape[shape_pos]) {
*ret_flag = 3;
return;
}
}
}
template <typename T>
int Coalesce(int64_t *origin_indices, int64_t *unique_indices, const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host, int64_t *flatten_input_indices, const int64_t *input_indices,
const T *input_values, const int64_t *input_shape, int64_t *output_indices, T *output_value,
int64_t *output_shape, const uint32_t &device_id, cudaStream_t cuda_stream) {
size_t allelement = indices_num * values_num;
int *ret_flag_device = nullptr;
(void)cudaMalloc(&ret_flag_device, sizeof(int));
(void)cudaMemset(ret_flag_device, 0, sizeof(int));
CoalesceKernelCheck<<<CUDA_BLOCKS(device_id, allelement), CUDA_THREADS(device_id), 0, cuda_stream>>>(
input_indices, input_shape, indices_num, values_num, ret_flag_device);
(void)cudaMemcpy(ret_flag_host, ret_flag_device, sizeof(int), cudaMemcpyDeviceToHost);
(void)cudaFree(ret_flag_device);
if (*ret_flag_host != 0) {
return -1;
}
auto policy = thrust::cuda::par.on(cuda_stream);
thrust::copy(thrust::device_pointer_cast(input_shape), thrust::device_pointer_cast(input_shape) + shape_elements,
thrust::device_pointer_cast(output_shape));
FlattenIndicesKernel<<<CUDA_BLOCKS(device_id, values_num), CUDA_THREADS(device_id), 0, cuda_stream>>>(
flatten_input_indices, indices_num, values_num, input_indices, input_shape);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::counting_iterator<int64_t> countIterI(0);
thrust::copy(policy, countIterI, countIterI + values_num, origin_indices);
thrust::sort_by_key(policy, thrust::device_pointer_cast(flatten_input_indices),
thrust::device_pointer_cast(flatten_input_indices) + values_num,
thrust::device_pointer_cast(origin_indices));
thrust::copy(policy, countIterO, countIterO + values_num, unique_indices);
thrust::pair<thrust::device_ptr<int64_t>, thrust::device_ptr<int64_t>> newEnd;
newEnd = thrust::unique_by_key(policy, thrust::device_pointer_cast(flatten_input_indices),
thrust::device_pointer_cast(flatten_input_indices) + values_num,
thrust::device_pointer_cast(unique_indices));
int64_t newNnz = newEnd.first - thrust::device_pointer_cast(flatten_input_indices);
CoalesceKernel<<<CUDA_BLOCKS(device_id, allelement), CUDA_THREADS(device_id), 0, cuda_stream>>>(
origin_indices, newNnz, unique_indices, indices_num, values_num, input_indices, input_values, output_indices,
output_value);
int output_size = newNnz;
return output_size;
}
template CUDA_LIB_EXPORT int Coalesce<float>(int64_t *origin_indices, int64_t *unique_indices,
const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host,
int64_t *flatten_input_indices, const int64_t *input_indices,
const float *input_values, const int64_t *input_shape,
int64_t *output_indices, float *output_value, int64_t *output_shape,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int Coalesce<half>(int64_t *origin_indices, int64_t *unique_indices,
const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host, int64_t *flatten_input_indices,
const int64_t *input_indices, const half *input_values,
const int64_t *input_shape, int64_t *output_indices, half *output_value,
int64_t *output_shape, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int Coalesce<double>(int64_t *origin_indices, int64_t *unique_indices,
const size_t shape_elements, const size_t indices_num,
const size_t values_num, int *ret_flag_host,
int64_t *flatten_input_indices, const int64_t *input_indices,
const double *input_values, const int64_t *input_shape,
int64_t *output_indices, double *output_value, int64_t *output_shape,
const uint32_t &device_id, cudaStream_t cuda_stream);
|
e6de34cb24c57bddc4d0510ea6415cf3471f8d1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated c Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* For more detail see the description below.
*/
__global__ void
magmagpu_cswapdblk(int nb,
magmaFloatComplex *dA1, int ldda1, int inca1,
magmaFloatComplex *dA2, int ldda2, int inca2 )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA1 += tx + bx * nb * (ldda1 + inca1);
dA2 += tx + bx * nb * (ldda2 + inca2);
magmaFloatComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ){
tmp = dA1[i*ldda1];
dA1[i*ldda1] = dA2[i*ldda2];
dA2[i*ldda2] = tmp;
}
}
extern "C" void
magmablas_cswapdblk(magma_int_t n, magma_int_t nb,
magmaFloatComplex *dA1, magma_int_t ldda1, magma_int_t inca1,
magmaFloatComplex *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This is an auxiliary MAGMA routine. It swaps diagonal blocks
of size nb x nb between matrices dA1 and dA2 on the GPU.
The number of blocks swapped is (n-1)/nb. For i = 1 .. (n-1)/nb matrices
dA1 + i * nb * (ldda1 + inca1) and
dA2 + i * nb * (ldda2 + inca2) are swapped.
*/
magma_int_t blocksize = nb;
dim3 blocks( (n-1) / blocksize, 1, 1);
hipLaunchKernelGGL(( magmagpu_cswapdblk), dim3(blocks), dim3(blocksize), 0, magma_stream , nb,
dA1, ldda1, inca1,
dA2, ldda2, inca2 );
}
| e6de34cb24c57bddc4d0510ea6415cf3471f8d1e.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated c Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* For more detail see the description below.
*/
__global__ void
magmagpu_cswapdblk(int nb,
magmaFloatComplex *dA1, int ldda1, int inca1,
magmaFloatComplex *dA2, int ldda2, int inca2 )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA1 += tx + bx * nb * (ldda1 + inca1);
dA2 += tx + bx * nb * (ldda2 + inca2);
magmaFloatComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ){
tmp = dA1[i*ldda1];
dA1[i*ldda1] = dA2[i*ldda2];
dA2[i*ldda2] = tmp;
}
}
extern "C" void
magmablas_cswapdblk(magma_int_t n, magma_int_t nb,
magmaFloatComplex *dA1, magma_int_t ldda1, magma_int_t inca1,
magmaFloatComplex *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
This is an auxiliary MAGMA routine. It swaps diagonal blocks
of size nb x nb between matrices dA1 and dA2 on the GPU.
The number of blocks swapped is (n-1)/nb. For i = 1 .. (n-1)/nb matrices
dA1 + i * nb * (ldda1 + inca1) and
dA2 + i * nb * (ldda2 + inca2) are swapped.
*/
magma_int_t blocksize = nb;
dim3 blocks( (n-1) / blocksize, 1, 1);
magmagpu_cswapdblk<<< blocks, blocksize, 0, magma_stream >>>( nb,
dA1, ldda1, inca1,
dA2, ldda2, inca2 );
}
|
c63d9eafb3d65eaf16c5c29fca8ff45f7f436282.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
const int WARP_SIZE = 32;
// The maximum number of threads in a block
const int MAX_BLOCK_SIZE = 512;
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename Dtype, typename Acctype>
struct Float2 {
Acctype v1, v2;
__device__ Float2() {}
__device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {}
__device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {}
__device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct SumOp {
__device__ SumOp(const DeviceTensor3 t) : tensor(t) {}
__device__ __forceinline__ Acctype operator()(int batch, int plane, int n) {
return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]);
}
const DeviceTensor3 tensor;
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct VarOp {
__device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {}
__device__ __forceinline__ Acctype operator()(int batch, int plane, int n) {
Dtype val = tensor[batch][plane][n];
return (val - mean) * (val - mean);
}
const Acctype mean;
const DeviceTensor3 tensor;
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) {
Dtype g = gradOutput[batch][plane][n];
Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean);
return Float2<Dtype, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template <typename Dtype, typename Acctype>
static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationUpdateOutputInference_kernel(
const DeviceTensor3 input,
DeviceTensor3 output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
Acctype epsilon) {
int plane = blockIdx.x;
Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon);
Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg());
Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1);
Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0);
// Write normalized and update the output
for (int batch = 0; batch < input.getSize(0); batch++) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
Dtype inp = input[batch][plane][x].ldg();
output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationMean_kernel(
const DeviceTensor3 input,
DeviceTensor1 out_mean) {
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2);
Acctype norm = Acctype(1) / N;
Acctype mean = reduce<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane) * norm;
if (threadIdx.x == 0) {
out_mean[plane] = ScalarConvert<Acctype, Dtype>::to(mean);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationVar_kernel(
const DeviceTensor3 input,
const DeviceTensor1 in_mean,
DeviceTensor1 out_var) {
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2);
Acctype norm = Acctype(1) / N;
Acctype mean = ScalarConvert<Dtype, Acctype>::to(in_mean[plane]);
Acctype var = reduce<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane) * norm;
if (threadIdx.x == 0) {
out_var[plane] = ScalarConvert<Acctype, Dtype>::to(var);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationUpdateOutput_kernelhaha(
const DeviceTensor3 input,
DeviceTensor3 output,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
const Acctype epsilon,
const Acctype momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveVar) {
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2);
// Compute the mean and variance across (batch, x/y/z)
/* Acctype norm = Acctype(1) / N;
Acctype mean = reduce<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane) * norm;
__syncthreads();
Acctype varN = reduce<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane);
Acctype invStd = 0;
if (varN != Acctype(0) || epsilon != Acctype(0)) {
invStd = 1 / sqrt(varN * norm + epsilon);
} */
Acctype mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
Acctype var = ScalarConvert<Dtype, Acctype>::to(saveVar[plane]);
Acctype invStd = 1 / sqrt(var + epsilon);
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// Momentum based writeback
// Acctype unbiasedVar = varN / (N - 1);
Acctype unbiasedVar = var * N / (N - 1);
// saveMean[plane] = ScalarConvert<Acctype, Dtype>::to(mean);
// saveStd[plane] = ScalarConvert<Acctype, Dtype>::to(invStd);
runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean);
runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar);
}
// Write normalized and update the output
Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1);
Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0);
for (int batch = 0; batch < input.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
Dtype inp = input[batch][plane][x].ldg();
output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationMeanGrad_kernel(
const DeviceTensor3 input,
const DeviceTensor3 gradOutput,
const DeviceTensor1 runningMean,
const DeviceTensor1 saveMean,
DeviceTensor1 gradOutputMean_all,
DeviceTensor1 dotP_all,
bool train) {
int plane = blockIdx.x;
int N = gradOutput.getSize(0) * gradOutput.getSize(2);
Acctype mean;
if (train) {
mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
} else {
mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]);
}
Acctype norm = Acctype(1) / N;
GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput);
Float2<Dtype, Acctype> res = reduce<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>(g, gradOutput, plane);
Acctype gradOutputMean = res.v1 * norm;
Acctype dotP = res.v2 * norm;
if (threadIdx.x == 0) {
gradOutputMean_all[plane] = ScalarConvert<Acctype, Dtype>::to(gradOutputMean);
dotP_all[plane] = ScalarConvert<Acctype, Dtype>::to(dotP);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationBackward_kernel(
const DeviceTensor3 input,
const DeviceTensor3 gradOutput,
const DeviceTensor1 gradOutputMean,
const DeviceTensor1 dotP_all,
DeviceTensor3 gradInput,
DeviceTensor1 gradWeight,
DeviceTensor1 gradBias,
const DeviceTensor1 weight,
const DeviceTensor1 runningMean,
const DeviceTensor1 runningVar,
const DeviceTensor1 saveMean,
const DeviceTensor1 saveVar,
bool train,
Acctype scale,
double eps) {
int plane = blockIdx.x;
int N = gradOutput.getSize(0) * gradOutput.getSize(2);
Acctype mean, stdVal;
if (train) {
mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
stdVal = 1 / sqrt(ScalarConvert<Dtype, Acctype>::to(saveVar[plane]) + eps);
} else {
mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]);
stdVal = 1 / sqrt(runningVar[plane] + eps);
}
Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1);
// Acctype norm = Acctype(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
// GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput);
// Float2<Dtype, Acctype> res = reduce<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>(g, gradOutput, plane);
// Acctype gradOutputSum = res.v1;
Acctype gradOutputSum = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]) * N;
// Acctype dotP = res.v2;
Acctype dotP = ScalarConvert<Dtype, Acctype>::to(dotP_all[plane]);
// Acctype gradMean = gradOutputSum * norm;
Acctype gradMean = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]);
// Acctype projScale = dotP * norm * stdVal * stdVal;
Acctype projScale = dotP * stdVal * stdVal;
Acctype gradScale = stdVal * weightVal;
if (gradInput.numElements() > 0) {
for (int batch = 0; batch < gradOutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradOutput.getSize(2); x += blockDim.x) {
Dtype gradOut = gradOutput[batch][plane][x];
if (train) {
Dtype inp = input[batch][plane][x];
Acctype proj = (inp - mean) * projScale;
gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale);
}
}
}
}
if (gradWeight.numElements() > 0) {
if (threadIdx.x == 0) {
gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal);
}
}
if (gradBias.numElements() > 0) {
if (threadIdx.x == 0) {
gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum);
}
}
}
#include "generic/batchnormp_cuda.cu"
#include "THHGenerateFloatTypes.h" | c63d9eafb3d65eaf16c5c29fca8ff45f7f436282.cu | #include "THCUNN.h"
#include "common.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
const int WARP_SIZE = 32;
// The maximum number of threads in a block
const int MAX_BLOCK_SIZE = 512;
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename Dtype, typename Acctype>
struct Float2 {
Acctype v1, v2;
__device__ Float2() {}
__device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {}
__device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {}
__device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct SumOp {
__device__ SumOp(const DeviceTensor3 t) : tensor(t) {}
__device__ __forceinline__ Acctype operator()(int batch, int plane, int n) {
return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]);
}
const DeviceTensor3 tensor;
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct VarOp {
__device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {}
__device__ __forceinline__ Acctype operator()(int batch, int plane, int n) {
Dtype val = tensor[batch][plane][n];
return (val - mean) * (val - mean);
}
const Acctype mean;
const DeviceTensor3 tensor;
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) {
Dtype g = gradOutput[batch][plane][n];
Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean);
return Float2<Dtype, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template <typename Dtype, typename Acctype>
static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationUpdateOutputInference_kernel(
const DeviceTensor3 input,
DeviceTensor3 output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
Acctype epsilon) {
int plane = blockIdx.x;
Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon);
Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg());
Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1);
Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0);
// Write normalized and update the output
for (int batch = 0; batch < input.getSize(0); batch++) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
Dtype inp = input[batch][plane][x].ldg();
output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationMean_kernel(
const DeviceTensor3 input,
DeviceTensor1 out_mean) {
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2);
Acctype norm = Acctype(1) / N;
Acctype mean = reduce<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane) * norm;
if (threadIdx.x == 0) {
out_mean[plane] = ScalarConvert<Acctype, Dtype>::to(mean);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationVar_kernel(
const DeviceTensor3 input,
const DeviceTensor1 in_mean,
DeviceTensor1 out_var) {
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2);
Acctype norm = Acctype(1) / N;
Acctype mean = ScalarConvert<Dtype, Acctype>::to(in_mean[plane]);
Acctype var = reduce<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane) * norm;
if (threadIdx.x == 0) {
out_var[plane] = ScalarConvert<Acctype, Dtype>::to(var);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationUpdateOutput_kernelhaha(
const DeviceTensor3 input,
DeviceTensor3 output,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
const Acctype epsilon,
const Acctype momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveVar) {
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2);
// Compute the mean and variance across (batch, x/y/z)
/* Acctype norm = Acctype(1) / N;
Acctype mean = reduce<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane) * norm;
__syncthreads();
Acctype varN = reduce<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane);
Acctype invStd = 0;
if (varN != Acctype(0) || epsilon != Acctype(0)) {
invStd = 1 / sqrt(varN * norm + epsilon);
} */
Acctype mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
Acctype var = ScalarConvert<Dtype, Acctype>::to(saveVar[plane]);
Acctype invStd = 1 / sqrt(var + epsilon);
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// Momentum based writeback
// Acctype unbiasedVar = varN / (N - 1);
Acctype unbiasedVar = var * N / (N - 1);
// saveMean[plane] = ScalarConvert<Acctype, Dtype>::to(mean);
// saveStd[plane] = ScalarConvert<Acctype, Dtype>::to(invStd);
runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean);
runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar);
}
// Write normalized and update the output
Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1);
Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0);
for (int batch = 0; batch < input.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
Dtype inp = input[batch][plane][x].ldg();
output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationMeanGrad_kernel(
const DeviceTensor3 input,
const DeviceTensor3 gradOutput,
const DeviceTensor1 runningMean,
const DeviceTensor1 saveMean,
DeviceTensor1 gradOutputMean_all,
DeviceTensor1 dotP_all,
bool train) {
int plane = blockIdx.x;
int N = gradOutput.getSize(0) * gradOutput.getSize(2);
Acctype mean;
if (train) {
mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
} else {
mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]);
}
Acctype norm = Acctype(1) / N;
GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput);
Float2<Dtype, Acctype> res = reduce<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>(g, gradOutput, plane);
Acctype gradOutputMean = res.v1 * norm;
Acctype dotP = res.v2 * norm;
if (threadIdx.x == 0) {
gradOutputMean_all[plane] = ScalarConvert<Acctype, Dtype>::to(gradOutputMean);
dotP_all[plane] = ScalarConvert<Acctype, Dtype>::to(dotP);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3>
__global__ void BatchNormalizationBackward_kernel(
const DeviceTensor3 input,
const DeviceTensor3 gradOutput,
const DeviceTensor1 gradOutputMean,
const DeviceTensor1 dotP_all,
DeviceTensor3 gradInput,
DeviceTensor1 gradWeight,
DeviceTensor1 gradBias,
const DeviceTensor1 weight,
const DeviceTensor1 runningMean,
const DeviceTensor1 runningVar,
const DeviceTensor1 saveMean,
const DeviceTensor1 saveVar,
bool train,
Acctype scale,
double eps) {
int plane = blockIdx.x;
int N = gradOutput.getSize(0) * gradOutput.getSize(2);
Acctype mean, stdVal;
if (train) {
mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
stdVal = 1 / sqrt(ScalarConvert<Dtype, Acctype>::to(saveVar[plane]) + eps);
} else {
mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]);
stdVal = 1 / sqrt(runningVar[plane] + eps);
}
Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1);
// Acctype norm = Acctype(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
// GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput);
// Float2<Dtype, Acctype> res = reduce<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>(g, gradOutput, plane);
// Acctype gradOutputSum = res.v1;
Acctype gradOutputSum = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]) * N;
// Acctype dotP = res.v2;
Acctype dotP = ScalarConvert<Dtype, Acctype>::to(dotP_all[plane]);
// Acctype gradMean = gradOutputSum * norm;
Acctype gradMean = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]);
// Acctype projScale = dotP * norm * stdVal * stdVal;
Acctype projScale = dotP * stdVal * stdVal;
Acctype gradScale = stdVal * weightVal;
if (gradInput.numElements() > 0) {
for (int batch = 0; batch < gradOutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradOutput.getSize(2); x += blockDim.x) {
Dtype gradOut = gradOutput[batch][plane][x];
if (train) {
Dtype inp = input[batch][plane][x];
Acctype proj = (inp - mean) * projScale;
gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale);
}
}
}
}
if (gradWeight.numElements() > 0) {
if (threadIdx.x == 0) {
gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal);
}
}
if (gradBias.numElements() > 0) {
if (threadIdx.x == 0) {
gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum);
}
}
}
#include "generic/batchnormp_cuda.cu"
#include "THCGenerateFloatTypes.h" |
4ba33ef97ed3a83a6039e376d0c61d2ef5690ba4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "TestpermuteWalkers.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int dim = 1;
const int nwl = 1;
const int *kr = NULL;
hipMalloc(&kr, XSIZE*YSIZE);
const float *xxC = NULL;
hipMalloc(&xxC, XSIZE*YSIZE);
float *xxCP = NULL;
hipMalloc(&xxCP, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
TestpermuteWalkers), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,kr,xxC,xxCP);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
TestpermuteWalkers), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,kr,xxC,xxCP);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
TestpermuteWalkers), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,kr,xxC,xxCP);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4ba33ef97ed3a83a6039e376d0c61d2ef5690ba4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "TestpermuteWalkers.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int dim = 1;
const int nwl = 1;
const int *kr = NULL;
cudaMalloc(&kr, XSIZE*YSIZE);
const float *xxC = NULL;
cudaMalloc(&xxC, XSIZE*YSIZE);
float *xxCP = NULL;
cudaMalloc(&xxCP, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
TestpermuteWalkers<<<gridBlock,threadBlock>>>(dim,nwl,kr,xxC,xxCP);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
TestpermuteWalkers<<<gridBlock,threadBlock>>>(dim,nwl,kr,xxC,xxCP);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
TestpermuteWalkers<<<gridBlock,threadBlock>>>(dim,nwl,kr,xxC,xxCP);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d00c9ffad1dfc439172bac9798829f9b8946bbfc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bit_reduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint32_t *input_array = NULL;
hipMalloc(&input_array, XSIZE*YSIZE);
uint32_t *intBuf = NULL;
hipMalloc(&intBuf, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bit_reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, input_array,intBuf);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bit_reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, input_array,intBuf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bit_reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, input_array,intBuf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d00c9ffad1dfc439172bac9798829f9b8946bbfc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bit_reduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint32_t *input_array = NULL;
cudaMalloc(&input_array, XSIZE*YSIZE);
uint32_t *intBuf = NULL;
cudaMalloc(&intBuf, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bit_reduce<<<gridBlock,threadBlock>>>(input_array,intBuf);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bit_reduce<<<gridBlock,threadBlock>>>(input_array,intBuf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bit_reduce<<<gridBlock,threadBlock>>>(input_array,intBuf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bee923d885f94041b9e1d6a2de5bf16bbcce67be.hip | // !!! This is a file automatically generated by hipify!!!
//utility.cu
#include "utility.h"
void printCudaError(hipError_t error, string msg, string fileName, int line)
{
cout<<msg<<""<<error<<"("<<fileName<<")("<<line<<")."<<endl;
exit(EXIT_FAILURE);
} | bee923d885f94041b9e1d6a2de5bf16bbcce67be.cu | //utility.cu
#include "utility.h"
void printCudaError(cudaError_t error, string msg, string fileName, int line)
{
cout<<msg<<"£¬´íÎóÂ룺"<<error<<"£¬Îļþ("<<fileName<<")£¬ÐÐÊý("<<line<<")."<<endl;
exit(EXIT_FAILURE);
} |
5ca351bfaabef31d65d08538fb62face771303b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "UpsampleLayer.h"
namespace nvinfer1
{
//
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3; //ii/w
ii = ii/d3; // ii/w2
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
// ((x*c1+y)*h1)*w1+w
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
//
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
//c,h,w
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype* input,Dtype * output,
int N,int C,int H ,int W) {
int numElem = N*C*H*W;
// gridSize, blockSize
hipLaunchKernelGGL(( upscale), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0, input,output, numElem, mScale, C, H, W);
}
size_t type2size(DataType dataType) {
size_t _size = 0;
switch (dataType)
{
case DataType::kFLOAT: _size = sizeof(float);break;
case DataType::kHALF: _size = sizeof(__half);break;
case DataType::kINT8: _size = sizeof(uint8_t);break;
default:std::cerr << "error data type" << std::endl;
}
return _size;
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(hipMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), hipMemcpyDeviceToDevice, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
return 0;
}
//CUDA_CHECK(hipStreamSynchronize(stream));
switch (mDataType)
{
case DataType::kFLOAT :
forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<uint8_t>((const uint8_t *)inputs[0],(uint8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
} | 5ca351bfaabef31d65d08538fb62face771303b9.cu | #include "UpsampleLayer.h"
namespace nvinfer1
{
//最近插值
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3; //ii/w
ii = ii/d3; // ii/w2
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
// ((x*c1+y)*h1)*w1+w
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
// 获取全局索引
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
//c,h,w
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype* input,Dtype * output,
int N,int C,int H ,int W) {
int numElem = N*C*H*W;
// gridSize, blockSize
upscale<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>>(input,output, numElem, mScale, C, H, W);
}
size_t type2size(DataType dataType) {
size_t _size = 0;
switch (dataType)
{
case DataType::kFLOAT: _size = sizeof(float);break;
case DataType::kHALF: _size = sizeof(__half);break;
case DataType::kINT8: _size = sizeof(uint8_t);break;
default:std::cerr << "error data type" << std::endl;
}
return _size;
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(cudaMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), cudaMemcpyDeviceToDevice, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
return 0;
}
//CUDA_CHECK(cudaStreamSynchronize(stream));
switch (mDataType)
{
case DataType::kFLOAT :
forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<uint8_t>((const uint8_t *)inputs[0],(uint8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
} |
578a872a2922acb1bb976d4d0e99a2f462f959c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h> /* log2(), pow() */
#include <cstdint> /* uint64_t */
#include <cstdlib> /* malloc() */
#include <iostream>
#include "../include/utils2.h"
#include "../include/utils.h"
/* bit_reverse(), modExp(), modulo() */
#include "../include/ntt.cuh" //INCLUDE HEADER FILE
#include "../include/utils_device.cuh"
#include "../include/cuda_device.cuh"
/** Adding Bit reversal Removement
* Perform an in-place iterative breadth-first decimation-in-time Cooley-Tukey NTT on an input vector and return the result
*
* @param vec The input vector to be transformed
* @param n The size of the input vector
* @param p The prime to be used as the modulus of the transformation
* @param r The primitive root of the prime
* @param rev Whether to perform bit reversal on the input vector
* @return The transformed vector
*/
using namespace std;
__global__ void ntt_cuda_kernel_stepA(uint64_t *g_idata, uint64_t num_bits, uint64_t n, uint64_t p, uint64_t r, bool rev, uint64_t *g_odata)
{
uint64_t m, k_, a, factor1, factor2;
//set thread ID
uint64_t tid = threadIdx.x;
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
//boundary check
if (tid >= n || idx >= n)return;
if (rev)
{
uint64_t reverse_num= 0;
for(uint64_t j = 0; j < num_bits; j++){
reverse_num = reverse_num << 1;
if(idx & (1 << j)){
reverse_num = reverse_num | 1;
}
}
g_odata[reverse_num] = g_idata[idx];
}
else
{
g_odata[idx] = g_idata[idx];
}
__syncthreads();
if (idx == 0)
{
for (uint64_t i = 1; i <= log2_D(n); i++)
{
m = pow_D(uint64_t(2), i);
k_ = (p - 1) / m;
a = modExp_D(r, k_, p);
for (uint64_t j = 0; j < n; j += m)
{
for (uint64_t k = 0; k < m / 2; k++)
{
factor1 = g_odata[j + k];
factor2 = modulo_D(modExp_D(a, k, p) * g_odata[j + k + m / 2], p);
g_odata[j + k] = modulo_D(factor1 + factor2, p);
g_odata[j + k + m / 2] = modulo_D(factor1 - factor2, p);
}
}
}
}
}
extern "C"
uint64_t *inPlaceNTT_DIT_stepA(uint64_t *vec, uint64_t n, uint64_t p, uint64_t r, bool rev)
{
double computestart, computeElaps,copystart,copyElaps;
int blocksize = 1024;
dim3 block(blocksize, 1);
dim3 grid((n - 1) / block.x + 1, 1);
//var init
size_t bytes = n * sizeof(uint64_t);
uint64_t *vec_host = (uint64_t *)malloc(bytes);
uint64_t *outVec_host = (uint64_t *)malloc(bytes);
memcpy(vec_host, vec, bytes);
// device memory declare
uint64_t *vec_dev = NULL;
uint64_t *outVec_dev = NULL;
//device memory allocate
CHECK(hipMalloc((void **)&vec_dev, bytes));
CHECK(hipMalloc((void **)&outVec_dev, bytes));
copystart= cpuSecond();
//remove bitreversal
uint64_t num_bits = log2(n);
CHECK(hipMemset(vec_dev,0,bytes))
CHECK(hipMemset(outVec_dev,0,bytes))
CHECK(hipMemcpy(vec_dev, vec_host, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
computestart= cpuSecond();
hipLaunchKernelGGL(( ntt_cuda_kernel_stepA), dim3(grid), dim3(block), 0, 0, vec_dev,num_bits, n, p, r, rev, outVec_dev);
CHECK(hipDeviceSynchronize());
computeElaps = 1000 * (cpuSecond() - computestart);
CHECK(hipMemcpy(outVec_host, outVec_dev, bytes, hipMemcpyDeviceToHost));
copyElaps = 1000 * (cpuSecond() - copystart);
printf("gpu 1 pure compute time: %lf compute+copy time: %lf for ### bit reversal### \n first two number is %lld %lld \n", computeElaps, copyElaps,outVec_host[0],outVec_host[1]);
CHECK(hipFree(vec_dev));
CHECK(hipFree(outVec_dev));
return outVec_host;
}
| 578a872a2922acb1bb976d4d0e99a2f462f959c7.cu | #include <cuda_runtime.h>
#include <math.h> /* log2(), pow() */
#include <cstdint> /* uint64_t */
#include <cstdlib> /* malloc() */
#include <iostream>
#include "../include/utils2.h"
#include "../include/utils.h"
/* bit_reverse(), modExp(), modulo() */
#include "../include/ntt.cuh" //INCLUDE HEADER FILE
#include "../include/utils_device.cuh"
#include "../include/cuda_device.cuh"
/** Adding Bit reversal Removement
* Perform an in-place iterative breadth-first decimation-in-time Cooley-Tukey NTT on an input vector and return the result
*
* @param vec The input vector to be transformed
* @param n The size of the input vector
* @param p The prime to be used as the modulus of the transformation
* @param r The primitive root of the prime
* @param rev Whether to perform bit reversal on the input vector
* @return The transformed vector
*/
using namespace std;
__global__ void ntt_cuda_kernel_stepA(uint64_t *g_idata, uint64_t num_bits, uint64_t n, uint64_t p, uint64_t r, bool rev, uint64_t *g_odata)
{
uint64_t m, k_, a, factor1, factor2;
//set thread ID
uint64_t tid = threadIdx.x;
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
//boundary check
if (tid >= n || idx >= n)return;
if (rev)
{
uint64_t reverse_num= 0;
for(uint64_t j = 0; j < num_bits; j++){
reverse_num = reverse_num << 1;
if(idx & (1 << j)){
reverse_num = reverse_num | 1;
}
}
g_odata[reverse_num] = g_idata[idx];
}
else
{
g_odata[idx] = g_idata[idx];
}
__syncthreads();
if (idx == 0)
{
for (uint64_t i = 1; i <= log2_D(n); i++)
{
m = pow_D(uint64_t(2), i);
k_ = (p - 1) / m;
a = modExp_D(r, k_, p);
for (uint64_t j = 0; j < n; j += m)
{
for (uint64_t k = 0; k < m / 2; k++)
{
factor1 = g_odata[j + k];
factor2 = modulo_D(modExp_D(a, k, p) * g_odata[j + k + m / 2], p);
g_odata[j + k] = modulo_D(factor1 + factor2, p);
g_odata[j + k + m / 2] = modulo_D(factor1 - factor2, p);
}
}
}
}
}
extern "C"
uint64_t *inPlaceNTT_DIT_stepA(uint64_t *vec, uint64_t n, uint64_t p, uint64_t r, bool rev)
{
double computestart, computeElaps,copystart,copyElaps;
int blocksize = 1024;
dim3 block(blocksize, 1);
dim3 grid((n - 1) / block.x + 1, 1);
//var init
size_t bytes = n * sizeof(uint64_t);
uint64_t *vec_host = (uint64_t *)malloc(bytes);
uint64_t *outVec_host = (uint64_t *)malloc(bytes);
memcpy(vec_host, vec, bytes);
// device memory declare
uint64_t *vec_dev = NULL;
uint64_t *outVec_dev = NULL;
//device memory allocate
CHECK(cudaMalloc((void **)&vec_dev, bytes));
CHECK(cudaMalloc((void **)&outVec_dev, bytes));
copystart= cpuSecond();
//remove bitreversal
uint64_t num_bits = log2(n);
CHECK(cudaMemset(vec_dev,0,bytes))
CHECK(cudaMemset(outVec_dev,0,bytes))
CHECK(cudaMemcpy(vec_dev, vec_host, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
computestart= cpuSecond();
ntt_cuda_kernel_stepA<<<grid, block>>>(vec_dev,num_bits, n, p, r, rev, outVec_dev);
CHECK(cudaDeviceSynchronize());
computeElaps = 1000 * (cpuSecond() - computestart);
CHECK(cudaMemcpy(outVec_host, outVec_dev, bytes, cudaMemcpyDeviceToHost));
copyElaps = 1000 * (cpuSecond() - copystart);
printf("gpu 1 pure compute time: %lf compute+copy time: %lf for ### bit reversal### \n first two number is %lld %lld \n", computeElaps, copyElaps,outVec_host[0],outVec_host[1]);
CHECK(cudaFree(vec_dev));
CHECK(cudaFree(outVec_dev));
return outVec_host;
}
|
d6368578ecedf263738ef718c938d55d1e6f0c35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
{
}
__global__ void vsquare(const double *a, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] * a[i];
} | d6368578ecedf263738ef718c938d55d1e6f0c35.cu | #include "includes.h"
extern "C"
{
}
__global__ void vsquare(const double *a, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] * a[i];
} |
ddd25cf5f09c923032d7fd4da9e1a21b9ea59fe2.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// #include <math.h>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
// // Each thread should copy some value from the filter array to the shared memory
// extern __shared__ float shared_filter[];
// if(threadIdx.x < filterWidth && threadIdx.y < filterWidth){
// int linear_index = threadIdx.x * filterWidth + threadIdx.y;
// shared_filter[linear_index] = filter[linear_index];
// }
// Synchronise the threads here before proceeding further
__syncthreads();
int index = thread_2D_pos.y * numCols + thread_2D_pos.x;
float res = 0.0f;
for (int i = -filterWidth/2; i <= filterWidth/2; ++i) {
for (int j = -filterWidth/2; j <= filterWidth/2; ++j) {
int imageRow = min(max(0,thread_2D_pos.y + i), numRows - 1);
int imageCol = min(max(0,thread_2D_pos.x + j), numCols - 1);
int tempInd = imageRow * numCols + imageCol;
res += float(filter[(i + filterWidth/2)*filterWidth + (j + filterWidth/2)] * inputChannel[tempInd]);
}
}
outputChannel[thread_1D_pos] = res;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
uchar4 rgb = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = rgb.x;
greenChannel[thread_1D_pos] = rgb.y;
blueChannel[thread_1D_pos] = rgb.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// hipMemset(d_red, 255, sizeof(unsigned char) * numRowsImage * numColsImage);
// std::cout << sizeof(unsigned char) * numRowsImage * numColsImage << std::endl;
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
int blockWidth = 16;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockWidth, blockWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockWidth + 1, numRows/blockWidth + 1, 1);
// const dim3 gridSize(1, 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// d_redBlurred = d_red;
// d_greenBlurred = d_green;
// d_blueBlurred = d_blue;
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| ddd25cf5f09c923032d7fd4da9e1a21b9ea59fe2.cu | #include "utils.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// #include <math.h>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
// // Each thread should copy some value from the filter array to the shared memory
// extern __shared__ float shared_filter[];
// if(threadIdx.x < filterWidth && threadIdx.y < filterWidth){
// int linear_index = threadIdx.x * filterWidth + threadIdx.y;
// shared_filter[linear_index] = filter[linear_index];
// }
// Synchronise the threads here before proceeding further
__syncthreads();
int index = thread_2D_pos.y * numCols + thread_2D_pos.x;
float res = 0.0f;
for (int i = -filterWidth/2; i <= filterWidth/2; ++i) {
for (int j = -filterWidth/2; j <= filterWidth/2; ++j) {
int imageRow = min(max(0,thread_2D_pos.y + i), numRows - 1);
int imageCol = min(max(0,thread_2D_pos.x + j), numCols - 1);
int tempInd = imageRow * numCols + imageCol;
res += float(filter[(i + filterWidth/2)*filterWidth + (j + filterWidth/2)] * inputChannel[tempInd]);
}
}
outputChannel[thread_1D_pos] = res;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
uchar4 rgb = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = rgb.x;
greenChannel[thread_1D_pos] = rgb.y;
blueChannel[thread_1D_pos] = rgb.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// cudaMemset(d_red, 255, sizeof(unsigned char) * numRowsImage * numColsImage);
// std::cout << sizeof(unsigned char) * numRowsImage * numColsImage << std::endl;
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
int blockWidth = 16;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockWidth, blockWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockWidth + 1, numRows/blockWidth + 1, 1);
// const dim3 gridSize(1, 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// d_redBlurred = d_red;
// d_greenBlurred = d_green;
// d_blueBlurred = d_blue;
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
4890150367b90976a5ba65525a2d5e8162e1fe50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <diagnose.hpp>
#include <fstream>
#include <limits>
#include <utils.cuh>
namespace HugeCTR {
namespace diagnose {
__device__ float atomicMin(float* address, float val) {
float old = val;
do {
val = old;
old = atomicExch(address, val);
} while (old < val);
return old;
}
__device__ float atomicMax(float* address, float val) {
float old = val;
do {
val = old;
old = atomicExch(address, val);
} while (old > val);
return old;
}
template <typename T>
__global__ void histogram_kernel(const T* arr, size_t len, float* range) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
float val = TypeConvertFunc<float, T>::convert(arr[i]);
if (val <= 0) {
atomicMin(range + 0, val);
atomicMax(range + 1, val);
}
if (val >= 0) {
atomicMin(range + 2, val);
atomicMax(range + 3, val);
}
}
}
template <typename T>
__global__ void verify_kernel(const T* arr, size_t len, int* flag);
template <>
__global__ void verify_kernel<float>(const float* arr, size_t len, int* flag) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
if (isnan(arr[i])) atomicAdd(flag, 1);
}
}
template <>
__global__ void verify_kernel(const __half* arr, size_t len, int* flag) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
if (__hisnan(arr[i])) {
atomicAdd(flag, 1);
}
}
}
template <typename T>
__global__ void sample_kernel(const T* arr, int len, float* arr_sample, int stride,
int max_sample_len) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
if (i % stride == 0) {
int j = i / stride;
if (j < max_sample_len) {
arr_sample[j] = TypeConvertFunc<float, T>::convert(arr[i]);
}
}
}
}
template <typename T>
void verify_and_histogram(const char* category, const Tensor2<T>& tensor,
const hipStream_t& stream) {
float h_array[4]{0.0f, -std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(), 0.0f};
int h_flag;
float* d_array;
int* d_flag;
CK_CUDA_THROW_(hipMalloc(&d_array, sizeof(h_array)));
CK_CUDA_THROW_(hipMalloc(&d_flag, sizeof(int)));
CK_CUDA_THROW_(
hipMemcpyAsync(d_array, h_array, sizeof(h_array), hipMemcpyHostToDevice, stream));
CK_CUDA_THROW_(hipMemsetAsync(d_flag, 0, sizeof(int), stream));
hipLaunchKernelGGL(( histogram_kernel), dim3(160), dim3(1024), 0, stream, tensor.get_ptr(), tensor.get_num_elements(), d_array);
hipLaunchKernelGGL(( verify_kernel), dim3(160), dim3(1024), 0, stream, tensor.get_ptr(), tensor.get_num_elements(), d_flag);
CK_CUDA_THROW_(
hipMemcpyAsync(h_array, d_array, sizeof(h_array), hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipMemcpyAsync(&h_flag, d_flag, sizeof(int), hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipStreamSynchronize(stream));
std::stringstream ss;
ss << "Diagnose for (" << category << "), Histogram [" << h_array[0] << ", " << h_array[1] << "]"
<< ", [" << h_array[2] << ", " << h_array[3] << "]" << std::endl;
MESSAGE_(ss.str());
if (h_flag != 0) {
CK_THROW_(Error_t::DataCheckError, std::string("Nan assert for ") + category + " failed(" +
std::to_string(h_flag) + ").");
}
CK_CUDA_THROW_(hipFree(d_array));
CK_CUDA_THROW_(hipFree(d_flag));
}
template <typename T>
void sample_and_print(const char* category, const Tensor2<T>& tensor, size_t sample_count,
const hipStream_t& stream) {
if (sample_count == 0) return;
std::unique_ptr<float[]> h_array(new float[sample_count]);
float* d_array;
CK_CUDA_THROW_(hipMalloc(&d_array, sample_count * sizeof(float)));
CK_CUDA_THROW_(hipMemsetAsync(d_array, 0, sample_count * sizeof(float), stream));
hipLaunchKernelGGL(( sample_kernel), dim3(160), dim3(1024), 0, stream, tensor.get_ptr(), tensor.get_num_elements(), d_array,
tensor.get_num_elements() / sample_count, sample_count);
CK_CUDA_THROW_(hipMemcpyAsync(h_array.get(), d_array, sample_count * sizeof(float),
hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipStreamSynchronize(stream));
std::stringstream ss;
ss << "Diagnose for (" << category << "), Sampling [";
for (size_t i = 0; i < min(sample_count, tensor.get_num_elements()); i++) {
if (i != 0) ss << ",";
ss << h_array[i];
}
ss << "]" << std::endl;
MESSAGE_(ss.str());
CK_CUDA_THROW_(hipFree(d_array));
}
template <typename T>
void sample_and_print(const char* category, const Tensor2<T>& tensor, int begin, int end,
const hipStream_t& stream) {
if (begin >= 0 && end <= static_cast<int>(tensor.get_num_elements()) && end > begin) {
} else if (end < 0 && begin >= -static_cast<int>(tensor.get_num_elements()) && end > begin) {
begin += tensor.get_num_elements();
end += tensor.get_num_elements();
} else {
return;
}
std::unique_ptr<T[]> h_array(new T[end - begin]);
CK_CUDA_THROW_(hipMemcpyAsync(h_array.get(), tensor.get_ptr() + begin,
(begin - end) * sizeof(float), hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipStreamSynchronize(stream));
std::stringstream ss;
ss << "Diagnose for (" << category << "), Sampling [";
for (size_t i = 0; i < end - begin; i++) {
if (i != 0) ss << ",";
ss << h_array[i];
}
ss << "]" << std::endl;
MESSAGE_(ss.str());
}
template <typename T>
void dump(const char* filename, const Tensor2<T>& tensor, const hipStream_t& stream) {
std::unique_ptr<T[]> h_array(new T[tensor.get_num_elements()]);
CK_CUDA_THROW_(hipMemcpyAsync(h_array.get(), tensor.get_ptr(), tensor.get_size_in_bytes(),
hipMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(hipStreamSynchronize(stream));
std::ofstream s(filename, std::ios::out | std::ios::binary);
s.write(reinterpret_cast<const char*>(h_array.get()), tensor.get_size_in_bytes());
s.close();
}
template void verify_and_histogram<float>(const char* category, const Tensor2<float>& tensor,
const hipStream_t& stream);
template void dump<unsigned int>(const char* filename, const Tensor2<unsigned int>& tensor,
const hipStream_t& stream);
template void dump<unsigned long>(const char* filename, const Tensor2<unsigned long>& tensor,
const hipStream_t& stream);
template void dump<long long>(const char* filename, const Tensor2<long long>& tensor,
const hipStream_t& stream);
template void dump<float>(const char* filename, const Tensor2<float>& tensor,
const hipStream_t& stream);
template void dump<__half>(const char* filename, const Tensor2<__half>& tensor,
const hipStream_t& stream);
} // namespace diagnose
} // namespace HugeCTR
| 4890150367b90976a5ba65525a2d5e8162e1fe50.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <diagnose.hpp>
#include <fstream>
#include <limits>
#include <utils.cuh>
namespace HugeCTR {
namespace diagnose {
__device__ float atomicMin(float* address, float val) {
float old = val;
do {
val = old;
old = atomicExch(address, val);
} while (old < val);
return old;
}
__device__ float atomicMax(float* address, float val) {
float old = val;
do {
val = old;
old = atomicExch(address, val);
} while (old > val);
return old;
}
template <typename T>
__global__ void histogram_kernel(const T* arr, size_t len, float* range) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
float val = TypeConvertFunc<float, T>::convert(arr[i]);
if (val <= 0) {
atomicMin(range + 0, val);
atomicMax(range + 1, val);
}
if (val >= 0) {
atomicMin(range + 2, val);
atomicMax(range + 3, val);
}
}
}
template <typename T>
__global__ void verify_kernel(const T* arr, size_t len, int* flag);
template <>
__global__ void verify_kernel<float>(const float* arr, size_t len, int* flag) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
if (isnan(arr[i])) atomicAdd(flag, 1);
}
}
template <>
__global__ void verify_kernel(const __half* arr, size_t len, int* flag) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
if (__hisnan(arr[i])) {
atomicAdd(flag, 1);
}
}
}
template <typename T>
__global__ void sample_kernel(const T* arr, int len, float* arr_sample, int stride,
int max_sample_len) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < len; i += blockDim.x * gridDim.x) {
if (i % stride == 0) {
int j = i / stride;
if (j < max_sample_len) {
arr_sample[j] = TypeConvertFunc<float, T>::convert(arr[i]);
}
}
}
}
template <typename T>
void verify_and_histogram(const char* category, const Tensor2<T>& tensor,
const cudaStream_t& stream) {
float h_array[4]{0.0f, -std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(), 0.0f};
int h_flag;
float* d_array;
int* d_flag;
CK_CUDA_THROW_(cudaMalloc(&d_array, sizeof(h_array)));
CK_CUDA_THROW_(cudaMalloc(&d_flag, sizeof(int)));
CK_CUDA_THROW_(
cudaMemcpyAsync(d_array, h_array, sizeof(h_array), cudaMemcpyHostToDevice, stream));
CK_CUDA_THROW_(cudaMemsetAsync(d_flag, 0, sizeof(int), stream));
histogram_kernel<<<160, 1024, 0, stream>>>(tensor.get_ptr(), tensor.get_num_elements(), d_array);
verify_kernel<<<160, 1024, 0, stream>>>(tensor.get_ptr(), tensor.get_num_elements(), d_flag);
CK_CUDA_THROW_(
cudaMemcpyAsync(h_array, d_array, sizeof(h_array), cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaMemcpyAsync(&h_flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaStreamSynchronize(stream));
std::stringstream ss;
ss << "Diagnose for (" << category << "), Histogram [" << h_array[0] << ", " << h_array[1] << "]"
<< ", [" << h_array[2] << ", " << h_array[3] << "]" << std::endl;
MESSAGE_(ss.str());
if (h_flag != 0) {
CK_THROW_(Error_t::DataCheckError, std::string("Nan assert for ") + category + " failed(" +
std::to_string(h_flag) + ").");
}
CK_CUDA_THROW_(cudaFree(d_array));
CK_CUDA_THROW_(cudaFree(d_flag));
}
template <typename T>
void sample_and_print(const char* category, const Tensor2<T>& tensor, size_t sample_count,
const cudaStream_t& stream) {
if (sample_count == 0) return;
std::unique_ptr<float[]> h_array(new float[sample_count]);
float* d_array;
CK_CUDA_THROW_(cudaMalloc(&d_array, sample_count * sizeof(float)));
CK_CUDA_THROW_(cudaMemsetAsync(d_array, 0, sample_count * sizeof(float), stream));
sample_kernel<<<160, 1024, 0, stream>>>(tensor.get_ptr(), tensor.get_num_elements(), d_array,
tensor.get_num_elements() / sample_count, sample_count);
CK_CUDA_THROW_(cudaMemcpyAsync(h_array.get(), d_array, sample_count * sizeof(float),
cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaStreamSynchronize(stream));
std::stringstream ss;
ss << "Diagnose for (" << category << "), Sampling [";
for (size_t i = 0; i < min(sample_count, tensor.get_num_elements()); i++) {
if (i != 0) ss << ",";
ss << h_array[i];
}
ss << "]" << std::endl;
MESSAGE_(ss.str());
CK_CUDA_THROW_(cudaFree(d_array));
}
template <typename T>
void sample_and_print(const char* category, const Tensor2<T>& tensor, int begin, int end,
const cudaStream_t& stream) {
if (begin >= 0 && end <= static_cast<int>(tensor.get_num_elements()) && end > begin) {
} else if (end < 0 && begin >= -static_cast<int>(tensor.get_num_elements()) && end > begin) {
begin += tensor.get_num_elements();
end += tensor.get_num_elements();
} else {
return;
}
std::unique_ptr<T[]> h_array(new T[end - begin]);
CK_CUDA_THROW_(cudaMemcpyAsync(h_array.get(), tensor.get_ptr() + begin,
(begin - end) * sizeof(float), cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaStreamSynchronize(stream));
std::stringstream ss;
ss << "Diagnose for (" << category << "), Sampling [";
for (size_t i = 0; i < end - begin; i++) {
if (i != 0) ss << ",";
ss << h_array[i];
}
ss << "]" << std::endl;
MESSAGE_(ss.str());
}
template <typename T>
void dump(const char* filename, const Tensor2<T>& tensor, const cudaStream_t& stream) {
std::unique_ptr<T[]> h_array(new T[tensor.get_num_elements()]);
CK_CUDA_THROW_(cudaMemcpyAsync(h_array.get(), tensor.get_ptr(), tensor.get_size_in_bytes(),
cudaMemcpyDeviceToHost, stream));
CK_CUDA_THROW_(cudaStreamSynchronize(stream));
std::ofstream s(filename, std::ios::out | std::ios::binary);
s.write(reinterpret_cast<const char*>(h_array.get()), tensor.get_size_in_bytes());
s.close();
}
template void verify_and_histogram<float>(const char* category, const Tensor2<float>& tensor,
const cudaStream_t& stream);
template void dump<unsigned int>(const char* filename, const Tensor2<unsigned int>& tensor,
const cudaStream_t& stream);
template void dump<unsigned long>(const char* filename, const Tensor2<unsigned long>& tensor,
const cudaStream_t& stream);
template void dump<long long>(const char* filename, const Tensor2<long long>& tensor,
const cudaStream_t& stream);
template void dump<float>(const char* filename, const Tensor2<float>& tensor,
const cudaStream_t& stream);
template void dump<__half>(const char* filename, const Tensor2<__half>& tensor,
const cudaStream_t& stream);
} // namespace diagnose
} // namespace HugeCTR
|
1ba6eeff4abc07ff876b0a743c190450f6d459d1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op_hip.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/map.cuh>
#include <raft/matrix/init.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
/*
* Padded_float is a 12 byte type that contains a single float. Two integers are
* used for padding. It is used to test types that are not power-of-two-sized.
*/
struct padded_float {
float value_;
int padding1;
int padding2;
padded_float() = default;
constexpr padded_float(const float& x) : value_(x), padding1(0), padding2(0) {}
constexpr padded_float(const padded_float&) = default;
constexpr padded_float& operator=(const padded_float&) = default;
constexpr float abs() const { return std::abs(value_); }
};
constexpr padded_float operator+(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ + y.value_);
}
constexpr padded_float operator-(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ - y.value_);
}
constexpr padded_float operator*(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ * y.value_);
}
constexpr padded_float operator*(const padded_float& x, const int& scalar)
{
return padded_float(scalar * x.value_);
}
constexpr bool operator==(const padded_float& x, const padded_float& y)
{
return x.value_ == y.value_;
}
constexpr bool operator<(const padded_float& x, const padded_float& y)
{
return x.value_ < y.value_;
}
constexpr bool operator>(const padded_float& x, const padded_float& y)
{
return x.value_ > y.value_;
}
inline auto operator<<(std::ostream& os, const padded_float& x) -> std::ostream&
{
os << x.value_;
return os;
}
template <typename InType, typename IdxType, typename OutType>
void mapLaunch(OutType* out,
const InType* in1,
const InType* in2,
const InType* in3,
InType scalar,
IdxType len,
hipStream_t stream)
{
raft::resources handle;
resource::set_cuda_stream(handle, stream);
auto out_view = raft::make_device_vector_view(out, len);
auto in1_view = raft::make_device_vector_view(in1, len);
auto in2_view = raft::make_device_vector_view(in2, len);
auto in3_view = raft::make_device_vector_view(in3, len);
map(
handle,
out_view,
[=] __device__(InType a, InType b, InType c) { return a + b + c + scalar; },
in1_view,
in2_view,
in3_view);
}
template <typename InType, typename IdxType = int, typename OutType = InType>
struct MapInputs {
InType tolerance;
IdxType len;
unsigned long long int seed;
InType scalar;
};
template <typename InType, typename IdxType, typename OutType = InType>
void create_ref(OutType* out_ref,
const InType* in1,
const InType* in2,
const InType* in3,
InType scalar,
IdxType len,
hipStream_t stream)
{
rmm::device_uvector<InType> tmp(len, stream);
eltwiseAdd(tmp.data(), in1, in2, len, stream);
eltwiseAdd(out_ref, tmp.data(), in3, len, stream);
scalarAdd(out_ref, out_ref, (OutType)scalar, len, stream);
RAFT_CUDA_TRY(hipStreamSynchronize(stream));
}
template <typename InType, typename IdxType, typename OutType = InType>
class MapTest : public ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>> {
public:
MapTest()
: params(::testing::TestWithParam<MapInputs<InType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
in3(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxType len = params.len;
if constexpr (std::is_floating_point<InType>::value) {
uniform(handle, r, in1.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in2.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in3.data(), len, InType(-1.0), InType(1.0));
} else {
// First create random float arrays
rmm::device_uvector<float> fin1(params.len, stream);
rmm::device_uvector<float> fin2(params.len, stream);
rmm::device_uvector<float> fin3(params.len, stream);
uniform(handle, r, fin1.data(), len, float(-1.0), float(1.0));
uniform(handle, r, fin2.data(), len, float(-1.0), float(1.0));
uniform(handle, r, fin3.data(), len, float(-1.0), float(1.0));
// Then pad them
raft::device_resources handle{stream};
auto fin1_view = raft::make_device_vector_view(fin1.data(), fin1.size());
auto fin2_view = raft::make_device_vector_view(fin2.data(), fin2.size());
auto fin3_view = raft::make_device_vector_view(fin3.data(), fin3.size());
auto in1_view = raft::make_device_vector_view(in1.data(), in1.size());
auto in2_view = raft::make_device_vector_view(in2.data(), in2.size());
auto in3_view = raft::make_device_vector_view(in3.data(), in3.size());
auto add_padding = [] __device__(float a) { return padded_float(a); };
raft::linalg::map(handle, in1_view, add_padding, raft::make_const_mdspan(fin1_view));
raft::linalg::map(handle, in2_view, add_padding, raft::make_const_mdspan(fin2_view));
raft::linalg::map(handle, in3_view, add_padding, raft::make_const_mdspan(fin3_view));
}
create_ref(out_ref.data(), in1.data(), in2.data(), in3.data(), params.scalar, len, stream);
mapLaunch(out.data(), in1.data(), in2.data(), in3.data(), params.scalar, len, stream);
RAFT_CUDA_TRY(hipStreamSynchronize(stream));
}
protected:
raft::resources handle;
hipStream_t stream;
MapInputs<InType, IdxType, OutType> params;
rmm::device_uvector<InType> in1, in2, in3;
rmm::device_uvector<OutType> out_ref, out;
};
template <typename OutType, typename IdxType>
class MapOffsetTest : public ::testing::TestWithParam<MapInputs<OutType, IdxType, OutType>> {
public:
MapOffsetTest()
: params(::testing::TestWithParam<MapInputs<OutType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
IdxType len = params.len;
OutType scalar = params.scalar;
naiveScale(out_ref.data(), (OutType*)nullptr, scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
map_offset(handle,
out_view,
raft::compose_op(raft::cast_op<OutType>(), raft::mul_const_op<OutType>(scalar)));
RAFT_CUDA_TRY(hipStreamSynchronize(stream));
}
protected:
raft::resources handle;
hipStream_t stream;
MapInputs<OutType, IdxType, OutType> params;
rmm::device_uvector<OutType> out_ref, out;
};
#define MAP_TEST(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
CompareApprox(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(MapTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<MapInputs<float, int>> inputsf_i32 = {{0.000001f, 1024 * 1024, 1234ULL, 3.2}};
MAP_TEST((MapTest<float, int>), MapTestF_i32, inputsf_i32);
MAP_TEST((MapOffsetTest<float, int>), MapOffsetTestF_i32, inputsf_i32);
const std::vector<MapInputs<float, size_t>> inputsf_i64 = {{0.000001f, 1024 * 1024, 1234ULL, 9.4}};
MAP_TEST((MapTest<float, size_t>), MapTestF_i64, inputsf_i64);
MAP_TEST((MapOffsetTest<float, size_t>), MapOffsetTestF_i64, inputsf_i64);
const std::vector<MapInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 1234ULL, 5.9}};
MAP_TEST((MapTest<float, int, double>), MapTestF_i32_D, inputsf_i32_d);
const std::vector<MapInputs<double, int>> inputsd_i32 = {{0.00000001, 1024 * 1024, 1234ULL, 7.5}};
MAP_TEST((MapTest<double, int>), MapTestD_i32, inputsd_i32);
MAP_TEST((MapOffsetTest<double, int>), MapOffsetTestD_i32, inputsd_i32);
const std::vector<MapInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 1234ULL, 5.2}};
MAP_TEST((MapTest<double, size_t>), MapTestD_i64, inputsd_i64);
MAP_TEST((MapOffsetTest<double, size_t>), MapOffsetTestD_i64, inputsd_i64);
// This comparison structure is necessary, because it is not straight-forward to
// add an overload of std::abs for padded_float.
struct ComparePadded {
float eps;
ComparePadded(float eps_) : eps(eps_) {}
ComparePadded(padded_float eps_) : eps(eps_.value_) {}
ComparePadded(double eps_) : eps(eps_) {}
bool operator()(const padded_float& a, const padded_float& b) const
{
float diff = (a - b).abs();
float m = ::max(a.abs(), b.abs());
float ratio = diff > eps ? diff / m : diff;
return (ratio <= eps);
}
};
// Use PaddedComparison
#define MAP_TEST_PADDED(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
ComparePadded(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(MapTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<MapInputs<padded_float, size_t>> inputsd_padded_float = {
{0.00000001, 1024 * 1024, 1234ULL, 5.2}};
MAP_TEST_PADDED((MapTest<padded_float, size_t>), MapTestD_padded_float, inputsd_padded_float);
MAP_TEST_PADDED((MapOffsetTest<padded_float, size_t>),
MapOffsetTestD_padded_float,
inputsd_padded_float);
} // namespace linalg
} // namespace raft
| 1ba6eeff4abc07ff876b0a743c190450f6d459d1.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/map.cuh>
#include <raft/matrix/init.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
/*
* Padded_float is a 12 byte type that contains a single float. Two integers are
* used for padding. It is used to test types that are not power-of-two-sized.
*/
struct padded_float {
float value_;
int padding1;
int padding2;
padded_float() = default;
constexpr padded_float(const float& x) : value_(x), padding1(0), padding2(0) {}
constexpr padded_float(const padded_float&) = default;
constexpr padded_float& operator=(const padded_float&) = default;
constexpr float abs() const { return std::abs(value_); }
};
constexpr padded_float operator+(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ + y.value_);
}
constexpr padded_float operator-(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ - y.value_);
}
constexpr padded_float operator*(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ * y.value_);
}
constexpr padded_float operator*(const padded_float& x, const int& scalar)
{
return padded_float(scalar * x.value_);
}
constexpr bool operator==(const padded_float& x, const padded_float& y)
{
return x.value_ == y.value_;
}
constexpr bool operator<(const padded_float& x, const padded_float& y)
{
return x.value_ < y.value_;
}
constexpr bool operator>(const padded_float& x, const padded_float& y)
{
return x.value_ > y.value_;
}
inline auto operator<<(std::ostream& os, const padded_float& x) -> std::ostream&
{
os << x.value_;
return os;
}
template <typename InType, typename IdxType, typename OutType>
void mapLaunch(OutType* out,
const InType* in1,
const InType* in2,
const InType* in3,
InType scalar,
IdxType len,
cudaStream_t stream)
{
raft::resources handle;
resource::set_cuda_stream(handle, stream);
auto out_view = raft::make_device_vector_view(out, len);
auto in1_view = raft::make_device_vector_view(in1, len);
auto in2_view = raft::make_device_vector_view(in2, len);
auto in3_view = raft::make_device_vector_view(in3, len);
map(
handle,
out_view,
[=] __device__(InType a, InType b, InType c) { return a + b + c + scalar; },
in1_view,
in2_view,
in3_view);
}
template <typename InType, typename IdxType = int, typename OutType = InType>
struct MapInputs {
InType tolerance;
IdxType len;
unsigned long long int seed;
InType scalar;
};
template <typename InType, typename IdxType, typename OutType = InType>
void create_ref(OutType* out_ref,
const InType* in1,
const InType* in2,
const InType* in3,
InType scalar,
IdxType len,
cudaStream_t stream)
{
rmm::device_uvector<InType> tmp(len, stream);
eltwiseAdd(tmp.data(), in1, in2, len, stream);
eltwiseAdd(out_ref, tmp.data(), in3, len, stream);
scalarAdd(out_ref, out_ref, (OutType)scalar, len, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
template <typename InType, typename IdxType, typename OutType = InType>
class MapTest : public ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>> {
public:
MapTest()
: params(::testing::TestWithParam<MapInputs<InType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
in3(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxType len = params.len;
if constexpr (std::is_floating_point<InType>::value) {
uniform(handle, r, in1.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in2.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in3.data(), len, InType(-1.0), InType(1.0));
} else {
// First create random float arrays
rmm::device_uvector<float> fin1(params.len, stream);
rmm::device_uvector<float> fin2(params.len, stream);
rmm::device_uvector<float> fin3(params.len, stream);
uniform(handle, r, fin1.data(), len, float(-1.0), float(1.0));
uniform(handle, r, fin2.data(), len, float(-1.0), float(1.0));
uniform(handle, r, fin3.data(), len, float(-1.0), float(1.0));
// Then pad them
raft::device_resources handle{stream};
auto fin1_view = raft::make_device_vector_view(fin1.data(), fin1.size());
auto fin2_view = raft::make_device_vector_view(fin2.data(), fin2.size());
auto fin3_view = raft::make_device_vector_view(fin3.data(), fin3.size());
auto in1_view = raft::make_device_vector_view(in1.data(), in1.size());
auto in2_view = raft::make_device_vector_view(in2.data(), in2.size());
auto in3_view = raft::make_device_vector_view(in3.data(), in3.size());
auto add_padding = [] __device__(float a) { return padded_float(a); };
raft::linalg::map(handle, in1_view, add_padding, raft::make_const_mdspan(fin1_view));
raft::linalg::map(handle, in2_view, add_padding, raft::make_const_mdspan(fin2_view));
raft::linalg::map(handle, in3_view, add_padding, raft::make_const_mdspan(fin3_view));
}
create_ref(out_ref.data(), in1.data(), in2.data(), in3.data(), params.scalar, len, stream);
mapLaunch(out.data(), in1.data(), in2.data(), in3.data(), params.scalar, len, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
MapInputs<InType, IdxType, OutType> params;
rmm::device_uvector<InType> in1, in2, in3;
rmm::device_uvector<OutType> out_ref, out;
};
template <typename OutType, typename IdxType>
class MapOffsetTest : public ::testing::TestWithParam<MapInputs<OutType, IdxType, OutType>> {
public:
MapOffsetTest()
: params(::testing::TestWithParam<MapInputs<OutType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
IdxType len = params.len;
OutType scalar = params.scalar;
naiveScale(out_ref.data(), (OutType*)nullptr, scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
map_offset(handle,
out_view,
raft::compose_op(raft::cast_op<OutType>(), raft::mul_const_op<OutType>(scalar)));
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
MapInputs<OutType, IdxType, OutType> params;
rmm::device_uvector<OutType> out_ref, out;
};
#define MAP_TEST(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
CompareApprox(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(MapTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<MapInputs<float, int>> inputsf_i32 = {{0.000001f, 1024 * 1024, 1234ULL, 3.2}};
MAP_TEST((MapTest<float, int>), MapTestF_i32, inputsf_i32);
MAP_TEST((MapOffsetTest<float, int>), MapOffsetTestF_i32, inputsf_i32);
const std::vector<MapInputs<float, size_t>> inputsf_i64 = {{0.000001f, 1024 * 1024, 1234ULL, 9.4}};
MAP_TEST((MapTest<float, size_t>), MapTestF_i64, inputsf_i64);
MAP_TEST((MapOffsetTest<float, size_t>), MapOffsetTestF_i64, inputsf_i64);
const std::vector<MapInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 1234ULL, 5.9}};
MAP_TEST((MapTest<float, int, double>), MapTestF_i32_D, inputsf_i32_d);
const std::vector<MapInputs<double, int>> inputsd_i32 = {{0.00000001, 1024 * 1024, 1234ULL, 7.5}};
MAP_TEST((MapTest<double, int>), MapTestD_i32, inputsd_i32);
MAP_TEST((MapOffsetTest<double, int>), MapOffsetTestD_i32, inputsd_i32);
const std::vector<MapInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 1234ULL, 5.2}};
MAP_TEST((MapTest<double, size_t>), MapTestD_i64, inputsd_i64);
MAP_TEST((MapOffsetTest<double, size_t>), MapOffsetTestD_i64, inputsd_i64);
// This comparison structure is necessary, because it is not straight-forward to
// add an overload of std::abs for padded_float.
struct ComparePadded {
float eps;
ComparePadded(float eps_) : eps(eps_) {}
ComparePadded(padded_float eps_) : eps(eps_.value_) {}
ComparePadded(double eps_) : eps(eps_) {}
bool operator()(const padded_float& a, const padded_float& b) const
{
float diff = (a - b).abs();
float m = std::max(a.abs(), b.abs());
float ratio = diff > eps ? diff / m : diff;
return (ratio <= eps);
}
};
// Use PaddedComparison
#define MAP_TEST_PADDED(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
ComparePadded(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(MapTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<MapInputs<padded_float, size_t>> inputsd_padded_float = {
{0.00000001, 1024 * 1024, 1234ULL, 5.2}};
MAP_TEST_PADDED((MapTest<padded_float, size_t>), MapTestD_padded_float, inputsd_padded_float);
MAP_TEST_PADDED((MapOffsetTest<padded_float, size_t>),
MapOffsetTestD_padded_float,
inputsd_padded_float);
} // namespace linalg
} // namespace raft
|
be4f7f118287f82d2e12f5c9b989065413757c96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void comm_empty(
real_2_t * __restrict__ sigma_in,
real_2_t * __restrict__ sigma_out,
real_2_t * __restrict__ hamiltonian)
{
}
__global__
void comm_init (
const real_2_t * __restrict__ sigma_in,
real_2_t * __restrict__ sigma_out,
const real_2_t * __restrict__ hamiltonian,
const int dim)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim;
// compute commutator: -i * dt/hbar * (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_2_t tmp;
tmp.x = 0.0;
tmp.y = 0.0;
for (int k = 0; k < dim; ++k) {
// z=(x,y), w=(u,v) z*w = (xu-yv, xv+yu)
tmp.x += (hamiltonian[i * dim + k].x * sigma_in[sigma_id + k * dim + j].x -
sigma_in[sigma_id + i * dim + k].x * hamiltonian[k * dim + j].x);
tmp.x -= (hamiltonian[i * dim + k].y * sigma_in[sigma_id + k * dim + j].y -
sigma_in[sigma_id + i * dim + k].y * hamiltonian[k * dim + j].y);
tmp.y += (hamiltonian[i * dim + k].x * sigma_in[sigma_id + k * dim + j].y -
sigma_in[sigma_id + i * dim + k].x * hamiltonian[k * dim + j].y);
tmp.y += (hamiltonian[i * dim + k].y * sigma_in[sigma_id + k * dim + j].x -
sigma_in[sigma_id + i * dim + k].y * hamiltonian[k * dim + j].x);
}
// multiply with -i * dt / hbar
sigma_out[sigma_id + i * dim + j].x += hdt * tmp.y;
sigma_out[sigma_id + i * dim + j].y -= hdt * tmp.x;
}
}
}
__global__
void comm_refactor(
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
#define sigma_real(i, j) (sigma_id + 2 * ((i) * dim + (j)))
#define sigma_imag(i, j) (sigma_id + 2 * ((i) * dim + (j)) + 1)
#define ham_real(i, j) (2 * ((i) * dim + (j)))
#define ham_imag(i, j) (2 * ((i) * dim + (k)) + 1)
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim * 2;
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_real -= hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real += sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_imag += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
// multiply with -i dt/hbar
sigma_out[sigma_real(i, j)] += hdt * tmp_imag;
sigma_out[sigma_imag(i, j)] -= hdt * tmp_real;
}
}
}
__global__
void comm_refactor_direct_store(
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
#define sigma_real(i, j) (sigma_id + 2 * ((i) * dim + (j)))
#define sigma_imag(i, j) (sigma_id + 2 * ((i) * dim + (j)) + 1)
#define ham_real(i, j) (2 * ((i) * dim + (j)))
#define ham_imag(i, j) (2 * ((i) * dim + (k)) + 1)
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim * 2;
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive(
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * dim * dim)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_naive_constants (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_naive_constants_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
#else
real_t tmp_real = sigma_out[sigma_real(i, j)];
real_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_aosoa_naive_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * dim * dim)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive_constants_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive_constants_direct_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * dim * dim))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_constants (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_constants_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
#else
real_t tmp_real = sigma_out[sigma_real(i, j)];
real_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_aosoa_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * dim * dim))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_constants_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_constants_direct_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// number of package to process == get_global_id(0)
#define package_id (gid * dim * dim * 2)
#define sigma_real(i, j) (package_id + 2 * (dim * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (dim * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_constants (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_constants_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_vec_t ham_real_tmp = v(hamiltonian[ham_real(i, k)]);
real_vec_t ham_imag_tmp = v(hamiltonian[ham_imag(i, k)]);
real_vec_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_vec_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
#else
real_vec_t tmp_real = sigma_out[sigma_real(i, j)];
real_vec_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_manual_aosoa_constants_perm_prefetch (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
int j = 0;
//(sigma_out.get_pointer() + sigma_real(i, j)).prefetch(2 * DIM);
for (j = 0; j < DIM; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * dim * dim * 2)
#define sigma_real(i, j) (package_id + 2 * (dim * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (dim * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct_prefetch (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
// prefetch result memory for the next inner loops
int j = 0;
//prefetch(&sigma_out[sigma_real(i, j)], 2 * DIM);
//(sigma_out.get_pointer() + sigma_real(i, j)).prefetch(2 * DIM);
for (j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k)
{
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_vec_t ham_real_tmp = v(hamiltonian[ham_real(i, k)]);
real_vec_t ham_imag_tmp = v(hamiltonian[ham_imag(i, k)]);
real_vec_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_vec_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void final_gpu_kernel (
const real_2_t * __restrict__ sigma_in,
real_2_t * __restrict__ sigma_out,
const real_2_t * __restrict__ hamiltonian,
const int num)
{
#define id_2d_to_1d(i,j) ((i) * DIM + (j))
#define sigma_id(i,j,m) ((m) * DIM * DIM + ((i) * DIM + (j)))
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
// Local memory: shared between all work items in the same work group
// 2-way shared memory bank conflicts will occur for real_t = double
// real parts and imaginary parts are stored separately to avoid 4-way bank conflicts in case of real_2_t = double2
// Input sigma matrix: real part (2 matrices are processed at once)
// Input sigma matrix: imag part (2 matrices are processed at once)
__shared__ real_t ham_local_real[DIM*DIM];
__shared__ real_t ham_local_imag[DIM*DIM];
__shared__ real_t sigma_local_real[2][NUM_SUB_GROUPS][DIM*DIM];
__shared__ real_t sigma_local_imag[2][NUM_SUB_GROUPS][DIM*DIM];
// Determine matrix index (i,j) this work item is responsible for
int ij = threadIdx.x;
int i = ij / DIM; // Matrix index 'i' to be processed by this work item in any of 'start -> stop' matrices
int j = ij % DIM; // Matrix index 'j' to be processed by this work item in any of 'start -> stop' matrices
// Determine working set : Each work item participates in processing CHUNK_SIZE matrices : 'start -> stop'
int sub_group_id = threadIdx.y; // Local matrix ID within work group
int start = blockIdx.x * NUM_SUB_GROUPS * CHUNK_SIZE + sub_group_id * CHUNK_SIZE; // Global matrix ID : start
int stop = MIN(num, start + CHUNK_SIZE); // Global matrix ID : stop
// Local variables
real_2_t snew1_ij, snew2_ij;
real_2_t s1, s2;
// Load Hamiltonian into local memory: only the first sub-group participates
if (ij < (DIM * DIM) && sub_group_id == 0)
{
const real_2_t h = hamiltonian[ij];
ham_local_real[ij] = h.x;
ham_local_imag[ij] = h.y;
}
// Process all CHUNK_SIZE matrices: two matrices are processed at once (therefore increment 2)
for (int m = start; m < stop; m += 2)
{
__syncthreads();
if (ij < (DIM * DIM))
{ // Load input sigma matrix into local memory: only threads with valid IDs participate
s1 = sigma_in[sigma_id(i, j, m)]; // Real and imaginary part of matrix 'm', element (i,j)
sigma_local_real[0][sub_group_id][ij] = s1.x;
sigma_local_imag[0][sub_group_id][ij] = s1.y;
s2 = sigma_in[sigma_id(i, j, m + 1)]; // Real and imaginary part of matrix 'm+1', element (i,j)
sigma_local_real[1][sub_group_id][ij] = s2.x;
sigma_local_imag[1][sub_group_id][ij] = s2.y;
s1 = sigma_out[sigma_id(i, j, m)]; // Prefetch real and imaginary part of output sigma matrix 'm', element (i,j)
snew1_ij.x = s1.x;
snew2_ij.x = s1.y;
s2 = sigma_out[sigma_id(i, j, m + 1)]; // Prefetch real and imaginary part of output sigma matrix 'm+1', element (i,j)
snew1_ij.y = s2.x;
snew2_ij.y = s2.y;
}
__syncthreads();
if (ij < (DIM * DIM))
{
// Compute commutator: [H,sigma] = H * sigma - sigma * H <=> [H,sigma]_ij = \sum_k ( H_ik * sigma_kj - sigma_ik * H_kj )
for (int k = 0; k < DIM; ++k)
{
const int ik = id_2d_to_1d(i, k);
const int kj = id_2d_to_1d(k, j);
// Reassemble real_2_t elements from local memory: 'vector processing' gives better performance here
s1 = {sigma_local_real[0][sub_group_id][kj], sigma_local_real[1][sub_group_id][kj]};
s2 = {sigma_local_imag[0][sub_group_id][kj], sigma_local_imag[1][sub_group_id][kj]};
snew1_ij += ham_local_real[ik] * s2;
snew1_ij += ham_local_imag[ik] * s1;
snew2_ij -= ham_local_real[ik] * s1;
snew2_ij += ham_local_imag[ik] * s2;
// Reassemble real_2_t elements from local memory: 'vector processing' gives better performance here
s1 = {sigma_local_real[0][sub_group_id][ik], sigma_local_real[1][sub_group_id][ik]};
s2 = {sigma_local_imag[0][sub_group_id][ik], sigma_local_imag[1][sub_group_id][ik]};
snew1_ij -= ham_local_real[kj] * s2;
snew1_ij += ham_local_imag[kj] * s1;
snew2_ij += ham_local_real[kj] * s1;
snew2_ij -= ham_local_imag[kj] * s2;
}
// Write output sigma matrices 'm' and 'm+1', element (i,j)
sigma_out[sigma_id(i, j, m)] = {snew1_ij.x, snew2_ij.x};
sigma_out[sigma_id(i, j, m + 1)] = {snew1_ij.y, snew2_ij.y};
}
}
}
| be4f7f118287f82d2e12f5c9b989065413757c96.cu | __global__
void comm_empty(
real_2_t * __restrict__ sigma_in,
real_2_t * __restrict__ sigma_out,
real_2_t * __restrict__ hamiltonian)
{
}
__global__
void comm_init (
const real_2_t * __restrict__ sigma_in,
real_2_t * __restrict__ sigma_out,
const real_2_t * __restrict__ hamiltonian,
const int dim)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim;
// compute commutator: -i * dt/hbar * (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_2_t tmp;
tmp.x = 0.0;
tmp.y = 0.0;
for (int k = 0; k < dim; ++k) {
// z=(x,y), w=(u,v) z*w = (xu-yv, xv+yu)
tmp.x += (hamiltonian[i * dim + k].x * sigma_in[sigma_id + k * dim + j].x -
sigma_in[sigma_id + i * dim + k].x * hamiltonian[k * dim + j].x);
tmp.x -= (hamiltonian[i * dim + k].y * sigma_in[sigma_id + k * dim + j].y -
sigma_in[sigma_id + i * dim + k].y * hamiltonian[k * dim + j].y);
tmp.y += (hamiltonian[i * dim + k].x * sigma_in[sigma_id + k * dim + j].y -
sigma_in[sigma_id + i * dim + k].x * hamiltonian[k * dim + j].y);
tmp.y += (hamiltonian[i * dim + k].y * sigma_in[sigma_id + k * dim + j].x -
sigma_in[sigma_id + i * dim + k].y * hamiltonian[k * dim + j].x);
}
// multiply with -i * dt / hbar
sigma_out[sigma_id + i * dim + j].x += hdt * tmp.y;
sigma_out[sigma_id + i * dim + j].y -= hdt * tmp.x;
}
}
}
__global__
void comm_refactor(
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
#define sigma_real(i, j) (sigma_id + 2 * ((i) * dim + (j)))
#define sigma_imag(i, j) (sigma_id + 2 * ((i) * dim + (j)) + 1)
#define ham_real(i, j) (2 * ((i) * dim + (j)))
#define ham_imag(i, j) (2 * ((i) * dim + (k)) + 1)
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim * 2;
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_real -= hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real += sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_imag += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
// multiply with -i dt/hbar
sigma_out[sigma_real(i, j)] += hdt * tmp_imag;
sigma_out[sigma_imag(i, j)] -= hdt * tmp_real;
}
}
}
__global__
void comm_refactor_direct_store(
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
#define sigma_real(i, j) (sigma_id + 2 * ((i) * dim + (j)))
#define sigma_imag(i, j) (sigma_id + 2 * ((i) * dim + (j)) + 1)
#define ham_real(i, j) (2 * ((i) * dim + (j)))
#define ham_imag(i, j) (2 * ((i) * dim + (k)) + 1)
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim * 2;
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive(
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * dim * dim)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_naive_constants (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_naive_constants_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
#else
real_t tmp_real = sigma_out[sigma_real(i, j)];
real_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_aosoa_naive_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * dim * dim)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive_constants_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive_constants_direct_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * dim * dim))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_constants (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_constants_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
#else
real_t tmp_real = sigma_out[sigma_real(i, j)];
real_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_aosoa_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * dim * dim))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_constants_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_constants_direct_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_t *__restrict__ sigma_in = (real_t*) sigma2_in;
real_t *__restrict__ sigma_out = (real_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// number of package to process == get_global_id(0)
#define package_id (gid * dim * dim * 2)
#define sigma_real(i, j) (package_id + 2 * (dim * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (dim * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_constants (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_constants_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_vec_t ham_real_tmp = v(hamiltonian[ham_real(i, k)]);
real_vec_t ham_imag_tmp = v(hamiltonian[ham_imag(i, k)]);
real_vec_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_vec_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
#else
real_vec_t tmp_real = sigma_out[sigma_real(i, j)];
real_vec_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_manual_aosoa_constants_perm_prefetch (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
int j = 0;
//(sigma_out.get_pointer() + sigma_real(i, j)).prefetch(2 * DIM);
for (j = 0; j < DIM; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2,
const int dim)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * dim * dim * 2)
#define sigma_real(i, j) (package_id + 2 * (dim * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (dim * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct_prefetch (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
// prefetch result memory for the next inner loops
int j = 0;
//prefetch(&sigma_out[sigma_real(i, j)], 2 * DIM);
//(sigma_out.get_pointer() + sigma_real(i, j)).prefetch(2 * DIM);
for (j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k)
{
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct_perm (
const real_2_t * __restrict__ sigma2_in,
real_2_t * __restrict__ sigma2_out,
const real_2_t * __restrict__ hamiltonian2)
{
real_vec_t *__restrict__ sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict__ sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict__ hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_vec_t ham_real_tmp = v(hamiltonian[ham_real(i, k)]);
real_vec_t ham_imag_tmp = v(hamiltonian[ham_imag(i, k)]);
real_vec_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_vec_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void final_gpu_kernel (
const real_2_t * __restrict__ sigma_in,
real_2_t * __restrict__ sigma_out,
const real_2_t * __restrict__ hamiltonian,
const int num)
{
#define id_2d_to_1d(i,j) ((i) * DIM + (j))
#define sigma_id(i,j,m) ((m) * DIM * DIM + ((i) * DIM + (j)))
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
// Local memory: shared between all work items in the same work group
// 2-way shared memory bank conflicts will occur for real_t = double
// real parts and imaginary parts are stored separately to avoid 4-way bank conflicts in case of real_2_t = double2
// Input sigma matrix: real part (2 matrices are processed at once)
// Input sigma matrix: imag part (2 matrices are processed at once)
__shared__ real_t ham_local_real[DIM*DIM];
__shared__ real_t ham_local_imag[DIM*DIM];
__shared__ real_t sigma_local_real[2][NUM_SUB_GROUPS][DIM*DIM];
__shared__ real_t sigma_local_imag[2][NUM_SUB_GROUPS][DIM*DIM];
// Determine matrix index (i,j) this work item is responsible for
int ij = threadIdx.x;
int i = ij / DIM; // Matrix index 'i' to be processed by this work item in any of 'start -> stop' matrices
int j = ij % DIM; // Matrix index 'j' to be processed by this work item in any of 'start -> stop' matrices
// Determine working set : Each work item participates in processing CHUNK_SIZE matrices : 'start -> stop'
int sub_group_id = threadIdx.y; // Local matrix ID within work group
int start = blockIdx.x * NUM_SUB_GROUPS * CHUNK_SIZE + sub_group_id * CHUNK_SIZE; // Global matrix ID : start
int stop = MIN(num, start + CHUNK_SIZE); // Global matrix ID : stop
// Local variables
real_2_t snew1_ij, snew2_ij;
real_2_t s1, s2;
// Load Hamiltonian into local memory: only the first sub-group participates
if (ij < (DIM * DIM) && sub_group_id == 0)
{
const real_2_t h = hamiltonian[ij];
ham_local_real[ij] = h.x;
ham_local_imag[ij] = h.y;
}
// Process all CHUNK_SIZE matrices: two matrices are processed at once (therefore increment 2)
for (int m = start; m < stop; m += 2)
{
__syncthreads();
if (ij < (DIM * DIM))
{ // Load input sigma matrix into local memory: only threads with valid IDs participate
s1 = sigma_in[sigma_id(i, j, m)]; // Real and imaginary part of matrix 'm', element (i,j)
sigma_local_real[0][sub_group_id][ij] = s1.x;
sigma_local_imag[0][sub_group_id][ij] = s1.y;
s2 = sigma_in[sigma_id(i, j, m + 1)]; // Real and imaginary part of matrix 'm+1', element (i,j)
sigma_local_real[1][sub_group_id][ij] = s2.x;
sigma_local_imag[1][sub_group_id][ij] = s2.y;
s1 = sigma_out[sigma_id(i, j, m)]; // Prefetch real and imaginary part of output sigma matrix 'm', element (i,j)
snew1_ij.x = s1.x;
snew2_ij.x = s1.y;
s2 = sigma_out[sigma_id(i, j, m + 1)]; // Prefetch real and imaginary part of output sigma matrix 'm+1', element (i,j)
snew1_ij.y = s2.x;
snew2_ij.y = s2.y;
}
__syncthreads();
if (ij < (DIM * DIM))
{
// Compute commutator: [H,sigma] = H * sigma - sigma * H <=> [H,sigma]_ij = \sum_k ( H_ik * sigma_kj - sigma_ik * H_kj )
for (int k = 0; k < DIM; ++k)
{
const int ik = id_2d_to_1d(i, k);
const int kj = id_2d_to_1d(k, j);
// Reassemble real_2_t elements from local memory: 'vector processing' gives better performance here
s1 = {sigma_local_real[0][sub_group_id][kj], sigma_local_real[1][sub_group_id][kj]};
s2 = {sigma_local_imag[0][sub_group_id][kj], sigma_local_imag[1][sub_group_id][kj]};
snew1_ij += ham_local_real[ik] * s2;
snew1_ij += ham_local_imag[ik] * s1;
snew2_ij -= ham_local_real[ik] * s1;
snew2_ij += ham_local_imag[ik] * s2;
// Reassemble real_2_t elements from local memory: 'vector processing' gives better performance here
s1 = {sigma_local_real[0][sub_group_id][ik], sigma_local_real[1][sub_group_id][ik]};
s2 = {sigma_local_imag[0][sub_group_id][ik], sigma_local_imag[1][sub_group_id][ik]};
snew1_ij -= ham_local_real[kj] * s2;
snew1_ij += ham_local_imag[kj] * s1;
snew2_ij += ham_local_real[kj] * s1;
snew2_ij -= ham_local_imag[kj] * s2;
}
// Write output sigma matrices 'm' and 'm+1', element (i,j)
sigma_out[sigma_id(i, j, m)] = {snew1_ij.x, snew2_ij.x};
sigma_out[sigma_id(i, j, m + 1)] = {snew1_ij.y, snew2_ij.y};
}
}
}
|
a8cf1b2c1c4ef8d5372deb3476f869e3c12c8d0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
#define UINT_BITS 32U
//Warps == subhistograms per threadblock
#define WARP_COUNT 6
//Threadblock size
#define HISTOGRAM256_THREADBLOCK_SIZE (WARP_COUNT * OPENCV_GPU_WARP_SIZE)
#define HISTOGRAM256_BIN_COUNT 256
//Shared memory per threadblock
#define HISTOGRAM256_THREADBLOCK_MEMORY (WARP_COUNT * HISTOGRAM256_BIN_COUNT)
#define PARTIAL_HISTOGRAM256_COUNT 240
#define MERGE_THREADBLOCK_SIZE 256
#define USE_SMEM_ATOMICS (__CUDA_ARCH__ >= 120)
namespace cv { namespace gpu { namespace histograms
{
#if (!USE_SMEM_ATOMICS)
#define TAG_MASK ( (1U << (UINT_BITS - OPENCV_GPU_LOG_WARP_SIZE)) - 1U )
__forceinline__ __device__ void addByte(volatile uint* s_WarpHist, uint data, uint threadTag)
{
uint count;
do
{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
}
#else
#define TAG_MASK 0xFFFFFFFFU
__forceinline__ __device__ void addByte(uint* s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
#endif
__forceinline__ __device__ void addWord(uint* s_WarpHist, uint data, uint tag, uint pos_x, uint cols)
{
uint x = pos_x << 2;
if (x + 0 < cols) addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
if (x + 1 < cols) addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
if (x + 2 < cols) addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
if (x + 3 < cols) addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256(const PtrStep_<uint> d_Data, uint* d_PartialHistograms, uint dataCount, uint cols)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint* s_WarpHist= s_Hist + (threadIdx.x >> OPENCV_GPU_LOG_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - OPENCV_GPU_LOG_WARP_SIZE);
__syncthreads();
const uint colsui = d_Data.step / sizeof(uint);
for(uint pos = blockIdx.x * blockDim.x + threadIdx.x; pos < dataCount; pos += blockDim.x * gridDim.x)
{
uint pos_y = pos / colsui;
uint pos_x = pos % colsui;
uint data = d_Data.ptr(pos_y)[pos_x];
addWord(s_WarpHist, data, tag, pos_x, cols);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
__global__ void mergeHistogram256(const uint* d_PartialHistograms, int* d_Histogram)
{
uint sum = 0;
#pragma unroll
for (uint i = threadIdx.x; i < PARTIAL_HISTOGRAM256_COUNT; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = saturate_cast<int>(data[0]);
}
void histogram256_gpu(DevMem2D src, int* hist, uint* buf, hipStream_t stream)
{
hipLaunchKernelGGL(( histogram256), dim3(PARTIAL_HISTOGRAM256_COUNT), dim3(HISTOGRAM256_THREADBLOCK_SIZE), 0, stream,
DevMem2D_<uint>(src),
buf,
static_cast<uint>(src.rows * src.step / sizeof(uint)),
src.cols);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( mergeHistogram256), dim3(HISTOGRAM256_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, stream, buf, hist);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
__constant__ int c_lut[256];
__global__ void equalizeHist(const DevMem2D src, PtrStep dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < src.cols && y < src.rows)
{
const uchar val = src.ptr(y)[x];
const int lut = c_lut[val];
dst.ptr(y)[x] = __float2int_rn(255.0f / (src.cols * src.rows) * lut);
}
}
void equalizeHist_gpu(DevMem2D src, DevMem2D dst, const int* lut, hipStream_t stream)
{
dim3 block(16, 16);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
cudaSafeCall( hipMemcpyToSymbol(cv::gpu::histograms::c_lut, lut, 256 * sizeof(int), 0, hipMemcpyDeviceToDevice) );
hipLaunchKernelGGL(( equalizeHist), dim3(grid), dim3(block), 0, stream, src, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}}}
| a8cf1b2c1c4ef8d5372deb3476f869e3c12c8d0d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
#define UINT_BITS 32U
//Warps == subhistograms per threadblock
#define WARP_COUNT 6
//Threadblock size
#define HISTOGRAM256_THREADBLOCK_SIZE (WARP_COUNT * OPENCV_GPU_WARP_SIZE)
#define HISTOGRAM256_BIN_COUNT 256
//Shared memory per threadblock
#define HISTOGRAM256_THREADBLOCK_MEMORY (WARP_COUNT * HISTOGRAM256_BIN_COUNT)
#define PARTIAL_HISTOGRAM256_COUNT 240
#define MERGE_THREADBLOCK_SIZE 256
#define USE_SMEM_ATOMICS (__CUDA_ARCH__ >= 120)
namespace cv { namespace gpu { namespace histograms
{
#if (!USE_SMEM_ATOMICS)
#define TAG_MASK ( (1U << (UINT_BITS - OPENCV_GPU_LOG_WARP_SIZE)) - 1U )
__forceinline__ __device__ void addByte(volatile uint* s_WarpHist, uint data, uint threadTag)
{
uint count;
do
{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
}
#else
#define TAG_MASK 0xFFFFFFFFU
__forceinline__ __device__ void addByte(uint* s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
#endif
__forceinline__ __device__ void addWord(uint* s_WarpHist, uint data, uint tag, uint pos_x, uint cols)
{
uint x = pos_x << 2;
if (x + 0 < cols) addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
if (x + 1 < cols) addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
if (x + 2 < cols) addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
if (x + 3 < cols) addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256(const PtrStep_<uint> d_Data, uint* d_PartialHistograms, uint dataCount, uint cols)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint* s_WarpHist= s_Hist + (threadIdx.x >> OPENCV_GPU_LOG_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - OPENCV_GPU_LOG_WARP_SIZE);
__syncthreads();
const uint colsui = d_Data.step / sizeof(uint);
for(uint pos = blockIdx.x * blockDim.x + threadIdx.x; pos < dataCount; pos += blockDim.x * gridDim.x)
{
uint pos_y = pos / colsui;
uint pos_x = pos % colsui;
uint data = d_Data.ptr(pos_y)[pos_x];
addWord(s_WarpHist, data, tag, pos_x, cols);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
__global__ void mergeHistogram256(const uint* d_PartialHistograms, int* d_Histogram)
{
uint sum = 0;
#pragma unroll
for (uint i = threadIdx.x; i < PARTIAL_HISTOGRAM256_COUNT; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = saturate_cast<int>(data[0]);
}
void histogram256_gpu(DevMem2D src, int* hist, uint* buf, cudaStream_t stream)
{
histogram256<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE, 0, stream>>>(
DevMem2D_<uint>(src),
buf,
static_cast<uint>(src.rows * src.step / sizeof(uint)),
src.cols);
cudaSafeCall( cudaGetLastError() );
mergeHistogram256<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE, 0, stream>>>(buf, hist);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__constant__ int c_lut[256];
__global__ void equalizeHist(const DevMem2D src, PtrStep dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < src.cols && y < src.rows)
{
const uchar val = src.ptr(y)[x];
const int lut = c_lut[val];
dst.ptr(y)[x] = __float2int_rn(255.0f / (src.cols * src.rows) * lut);
}
}
void equalizeHist_gpu(DevMem2D src, DevMem2D dst, const int* lut, cudaStream_t stream)
{
dim3 block(16, 16);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
cudaSafeCall( cudaMemcpyToSymbol(cv::gpu::histograms::c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice) );
equalizeHist<<<grid, block, 0, stream>>>(src, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}}}
|
0d782b513d6e91552128b86d391d2fa9b89b13da.hip | // !!! This is a file automatically generated by hipify!!!
// this is the main function file
//this peogram is a 2D fluid simulation using finite volume method with multiblocks structure mesh
// code is accelerated by GPU device
// allright keep by the writer cofludy
// connect [email protected]
#include<fstream>
#include <string>
#include <iomanip>
#include<iostream>
#include "const_var.h"
#include "Global_var.h"
#include"sub_init.h"
#include "sub_boundary.h"
#include "sub_Finite_Difference.h"
#include "sub_turbulence_SST.h"
#include "common.h"
#include "sub_NS_singlegid.h"
#include "postAnalyze.h"
hipError_t CheckCudaDevice();
void read_parameter();
void set_control_para();
void output_Res(int nMesh);
void output(int nMesh);
void myFree();
int FileOpenFlag = 0;
extern bool USEGPU = true;
int main()
{
printf("----------------- OpenCFD-EC2D in CUDA ver 2.0.0 --------------------------\n");
printf(" Copyright by Lee Hiloong, [email protected]\n");
printf(" Programming by Lee HiLoong 2017-7-12 \n \n");
// Add vectors in parallel.
hipError_t cudaStatus = CheckCudaDevice();
read_parameter(); //
check_mesh_multigrid(); //,
Init(); // ;
set_control_para(); //
Update_coordinate_buffer(); //
Init_FiniteDifference(); //Jocabian
if (Iflag_turbulence_model == Turbulence_SST || Iflag_turbulence_model == Turbulence_SA) {
comput_dw(); // SASST)
}
Init_flow();
//Mesh_TYPE & MP = Mesh[1];
//Block_TYPE & B = MP.Block[1]; //nMesh mBlock
//printf("%f, %f, %f, %f", B.U[1][1][1], B.U[1][1][2], B.U[1][1][3], B.U[1][1][4]);
//PAUSE;
printf("start ... ...\n");
//
clock_t start, finish;
start = clock();
//------------------------------------------------------------------------
// 1Euler, 3RKLU - SGS
for (; Mesh[1].tt < t_end && Mesh[1].Kstep<2000; ) {
//(1Euler, 3RK, LU - SGS)
NS_Time_advance(1);
if (Mesh[1].Kstep % Kstep_show == 0) {
output_Res(1); //()
}
if (Mesh[1].Kstep % Kstep_save == 0) {
output(1); //()
/*outputPressureOnWall(1);*/
outPutVelcoity();
}
}
output(1);
outPutVelcoity();
finish = clock();
double duration = (double)(finish - start) / CLOCKS_PER_SEC;
std::cout << "running time = " << duration << " s = " << duration / 60.0 << " min\n\n Press Any Button to Exit" << std::endl;
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
PAUSE;
myFree();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t CheckCudaDevice()
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
HANDLE_ERROR(hipSetDevice(0));
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
return cudaStatus;
}
//
void read_parameter()
{
std::ifstream fcin;
fcin.open("control.in");
std::string tempStr;
getline(fcin, tempStr);
fcin >> Ma >> Re >> gamma >> AoA >> Pr >> t_end >> Kstep_save >> If_viscous >> Iflag_turbulence_model >> Iflag_init;
getline(fcin, tempStr); getline(fcin, tempStr);
fcin >> Iflag_local_dt >> dt_global >> CFL >> dtmax >> dtmin >> Time_Method >> p_outlet >> T_inf >> Twall >> vt_inf >> Kt_inf >> Wt_inf;
getline(fcin, tempStr); getline(fcin, tempStr);
fcin >> Iflag_Scheme >> Iflag_Flux >> IFlag_Reconstruction >> Kstep_show;
getline(fcin, tempStr); getline(fcin, tempStr);
fcin >> Num_Mesh >> Num_Threads >> Nstep_Inner_Limit >> Res_Inner_Limit;
getline(fcin, tempStr); getline(fcin, tempStr);
for (int i = 1; i <= Num_Mesh; ++i) {
fcin >> Pre_Step_Mesh[i];
}
fcin.close();
//printf("try= %d", Kstep_save);
if ((Time_Method == Time_LU_SGS || Time_Method == Time_Dual_LU_SGS) && 1 != Num_Mesh) {
printf("In this version (ver 1.5.1 ), LU_SGS method Do Not support Multigrid !!!\n");
printf("Please modify 'control.in' to choose single-grid or other time method\n");
}
if (Iflag_turbulence_model == Turbulence_SST) {
Nvar = 6; //6(4 + k + w)
}
else if (Iflag_turbulence_model == Turbulence_SA) {
Nvar = 5;
}
else {
Nvar = 4;
}
AoA = AoA*PI / 180.e0;
Cv = 1.e0 / (gamma*(gamma - 1.e0)*Ma*Ma);
Cp = Cv*gamma;
Twall = Twall / T_inf;
}
//
void set_control_para()
{
Mesh_TYPE & MP = Mesh[1]; //
//
MP.Iflag_turbulence_model = Iflag_turbulence_model;
MP.Iflag_Scheme = Iflag_Scheme;
MP.IFlag_flux = Iflag_Flux;
MP.IFlag_Reconstruction = IFlag_Reconstruction;
MP.Nvar = Nvar; //Nvar
//
for (int nMesh = 2; nMesh <= Num_Mesh; ++nMesh) {
Mesh_TYPE & MP = Mesh[nMesh];
MP.Iflag_turbulence_model = Turbulence_NONE;
MP.Iflag_Scheme = Scheme_UD1;
MP.IFlag_flux = Iflag_Flux;
MP.IFlag_Reconstruction = IFlag_Reconstruction;
MP.Nvar = 4; //4
}
}
//-------------------------------------- -
//
void output_Res(int nMesh)
{
printf("\n\n Kstep= %d , t= %f \n", Mesh[nMesh].Kstep, Mesh[nMesh].tt);
printf("----------The Max Residuals are-------- ---Mesh--- %d\n", nMesh);
for (int i = 1; i <= Nvar; ++i) {
printf("%13.9f ", Mesh[nMesh].Res_max[i]);
}
printf("\n");
printf(" The R.M.S Residuals are \n");
for (int i = 1; i <= Nvar; ++i) {
printf("%13.9f ", Mesh[nMesh].Res_rms[i]);
}
printf("\n");
std::ofstream fcout;
if (!FileOpenFlag) {
FileOpenFlag = 1;
fcout.open("Residual.dat");
fcout.close();
}
fcout.open("Residual.dat", std::ios::app);
fcout << std::setprecision(15);
if (fcout.is_open()) {
fcout << Mesh[nMesh].Kstep << " ";
for (int i = 1; i <= Nvar; ++i) {
//printf("%13.9f ", Mesh[nMesh].Res_max[i]);
fcout << Mesh[nMesh].Res_max[i] << " ";
}
//fcout << std::endl;
for (int i = 1; i <= Nvar; ++i) {
//printf("%13.9f ", Mesh[nMesh].Res_rms[i]);
fcout << Mesh[nMesh].Res_rms[i] << " ";
}
fcout << std::endl;
fcout.close();
}
}
//----------------------------------------------------------------------
// tecplot, flow2d.dat; flow2d - 2.dat; flow2d - 3.dat
void output(int nMesh)
{
std::string filename;
if (nMesh == 1) {
filename = "flow2d.dat";
}
else {
filename = "flow2d-";
std::string tempStr = std::to_string(nMesh);
filename = filename + tempStr + ".plt"; // flow2d-2.dat ; flow2d-3.dat
}
if (nMesh == 1) {
std::string filename1 = "flow2d-";
std::string tempStr1 = std::to_string(Mesh[1].Kstep);
filename1 = filename1 + tempStr1 + ".plt"; // flow2d-2.dat ; flow2d-3.dat
Mesh_TYPE & MP = Mesh[1];
std::ofstream fcout;
fcout.open(filename1);
fcout << std::setprecision(12);
fcout << " variables=x,y,d,u,v,T,p " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
double d1 = B.U[i][j][1];
double u1 = B.U[i][j][2] / d1;
double v1 = B.U[i][j][3] / d1;
double T1 = (B.U[i][j][4] - 0.50*d1*(u1*u1 + v1*v1)) / (Cv*d1);
double p1 = d1*T1 / (gamma*Ma*Ma);
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << d1 << " " << u1 << " " << v1 << " " << T1 << " " << p1 << std::endl;
}
}
}
fcout.close();
}
printf("write data file ...\n");
Mesh_TYPE & MP = Mesh[nMesh];
std::ofstream fcout;
fcout.open(filename);
fcout << std::setprecision(12);
fcout << " variables=x,y,d,u,v,T,p,Amut " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
double d1 = B.U[i][j][1];
double u1 = B.U[i][j][2] / d1;
double v1 = B.U[i][j][3] / d1;
double T1 = (B.U[i][j][4] - 0.50*d1*(u1*u1 + v1*v1)) / (Cv*d1);
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << d1 << " " << u1 << " " << v1 << " " << T1 << " ";
fcout << d1*T1 / (gamma*Ma*Ma) << " " << B.Amu_t[i][j] * Re << std::endl;
}
}
}
fcout.close();
if (MP.Nvar == 5) {
fcout.open("SA2d.dat");
fcout << std::setprecision(12);
fcout << " variables=x,y,vt " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << B.U[i][j][5] << std::endl;
}
}
}
fcout.close();
}
if (MP.Nvar == 6) {
fcout.open("SST2D.dat");
fcout << std::setprecision(12);
fcout << " variables=x,y,Kt, Wt " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << B.U[i][j][5] << " " << B.U[i][j][6] << std::endl;
}
}
}
fcout.close();
}
}
void outputDebug()
{
#if debug
Mesh_TYPE & MP = Mesh[1];
std::ofstream fcout;
fcout.open("outputDebug.plt");
fcout << std::setprecision(12);
fcout << " variables=x,y,d,u,v,T,p " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[1].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = 1; j <= B.ny + 2 * LAP - 1; ++j) {
for (int i = 1; i <= B.nx + 2 * LAP - 1; ++i) {
double d1 = B.U[i][j][1];
double u1 = B.U[i][j][2] / d1;
double v1 = B.U[i][j][3] / d1;
double T1 = (B.U[i][j][4] - 0.50*d1*(u1*u1 + v1*v1)) / (Cv*d1);
double p1 = d1*T1 / (gamma*Ma*Ma);
if (abs(d1) > 100 || abs(T1) > 100 || abs(u1) > 100 || abs(v1) > 100) {
printf("%d, %d, \n %f, %f, %f, %f,", i, j, d1, u1, v1, T1);
printf("this error is at %s file at %d line", __FILE__, __LINE__);
PAUSE;
}
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << d1 << " " << u1 << " " << v1 << " " << T1 << " " << p1 << std::endl;
}
}
}
fcout.close();
#endif
}
void myFree()
{
for (int iMesh = 1; iMesh <= Num_Mesh; ++iMesh) {
Mesh_TYPE & MP = Mesh[iMesh];
for (int iBlock = 1; iBlock <= MP.Num_Block; ++iBlock) {
Block_TYPE & B = MP.Block[iBlock];
int nx = B.nx; int ny = B.ny;
int mm = nx + 2 * LAP; int nn = ny + 2 * LAP;
deleteMatrix(B.x, mm); deleteMatrix(B.y, mm);
int mm1 = nx + 2 * LAP - 1; int nn1 = ny + 2 * LAP - 1;
deleteMatrix(B.x1, mm1); deleteMatrix(B.y1, mm1);
deleteMatrix(B.U, mm1, nn1); deleteMatrix(B.deltU, mm1, nn1);
deleteMatrix(B.Amu, mm1); deleteMatrix(B.Amu_t, mm1);
deleteMatrix(B.vol, nx);
deleteMatrix(B.si, nx); deleteMatrix(B.sj, nx);
deleteMatrix(B.ni1, nx); deleteMatrix(B.ni2, nx);
deleteMatrix(B.nj1, nx); deleteMatrix(B.nj2, nx);
//
deleteMatrix(B.Lci, nx); deleteMatrix(B.Lcj, nx);
deleteMatrix(B.Lvi, nx); deleteMatrix(B.Lvj, nx);
deleteMatrix(B.Un, nx, ny); //
if (Time_Method == Time_Dual_LU_SGS) {
deleteMatrix(B.Un1, nx); //n-1 LU_SGS
}
}
}
free(Mesh);
Mesh = NULL;
}
| 0d782b513d6e91552128b86d391d2fa9b89b13da.cu | // this is the main function file
//this peogram is a 2D fluid simulation using finite volume method with multiblocks structure mesh
// code is accelerated by GPU device
// allright keep by the writer cofludy
// connect [email protected]
#include<fstream>
#include <string>
#include <iomanip>
#include<iostream>
#include "const_var.h"
#include "Global_var.h"
#include"sub_init.h"
#include "sub_boundary.h"
#include "sub_Finite_Difference.h"
#include "sub_turbulence_SST.h"
#include "common.h"
#include "sub_NS_singlegid.h"
#include "postAnalyze.h"
cudaError_t CheckCudaDevice();
void read_parameter();
void set_control_para();
void output_Res(int nMesh);
void output(int nMesh);
void myFree();
int FileOpenFlag = 0;
extern bool USEGPU = true;
int main()
{
printf("----------------- OpenCFD-EC2D in CUDA ver 2.0.0 --------------------------\n");
printf(" Copyright by Lee Hiloong, [email protected]\n");
printf(" Programming by Lee HiLoong 2017-7-12 \n \n");
// Add vectors in parallel.
cudaError_t cudaStatus = CheckCudaDevice();
read_parameter(); //读取流动参数及控制信息
check_mesh_multigrid(); //检查网格配置所允许的最大重数, 并设定多重网格的重数
Init(); // 初始化,创建数据结构; 读入几何及物理信息
set_control_para(); //设定各重网格上的控制信息(数值方法、通量技术、湍流模型、时间推进方式)
Update_coordinate_buffer(); //利用连接信息,给出虚网格的坐标
Init_FiniteDifference(); //设定有限差分法的区域,计算Jocabian变换系数
if (Iflag_turbulence_model == Turbulence_SST || Iflag_turbulence_model == Turbulence_SA) {
comput_dw(); //计算各网格(中心)点到壁面的距离 (采用SA或SST模型时需要此计算)
}
Init_flow();
//Mesh_TYPE & MP = Mesh[1];
//Block_TYPE & B = MP.Block[1]; //第nMesh 重网格的第mBlock块
//printf("%f, %f, %f, %f", B.U[1][1][1], B.U[1][1][2], B.U[1][1][3], B.U[1][1][4]);
//PAUSE;
printf("start ... ...\n");
//统计运行时间
clock_t start, finish;
start = clock();
//------------------------------------------------------------------------
//时间推进,采用单重网格、二重网格或三重网格; 采用1阶Euler, 3阶RK或LU - SGS
for (; Mesh[1].tt < t_end && Mesh[1].Kstep<2000; ) {
//单重网格时间推进(1阶Euler, 3阶RK, LU - SGS)
NS_Time_advance(1);
if (Mesh[1].Kstep % Kstep_show == 0) {
output_Res(1); //打印残差(最密网格)
}
if (Mesh[1].Kstep % Kstep_save == 0) {
output(1); //输出流场(最密网格)
/*outputPressureOnWall(1);*/
outPutVelcoity();
}
}
output(1);
outPutVelcoity();
finish = clock();
double duration = (double)(finish - start) / CLOCKS_PER_SEC;
std::cout << "running time = " << duration << " s = " << duration / 60.0 << " min\n\n Press Any Button to Exit" << std::endl;
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
PAUSE;
myFree();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t CheckCudaDevice()
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
HANDLE_ERROR(cudaSetDevice(0));
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
return cudaStatus;
}
//读取流动参数及控制信息
void read_parameter()
{
std::ifstream fcin;
fcin.open("control.in");
std::string tempStr;
getline(fcin, tempStr);
fcin >> Ma >> Re >> gamma >> AoA >> Pr >> t_end >> Kstep_save >> If_viscous >> Iflag_turbulence_model >> Iflag_init;
getline(fcin, tempStr); getline(fcin, tempStr);
fcin >> Iflag_local_dt >> dt_global >> CFL >> dtmax >> dtmin >> Time_Method >> p_outlet >> T_inf >> Twall >> vt_inf >> Kt_inf >> Wt_inf;
getline(fcin, tempStr); getline(fcin, tempStr);
fcin >> Iflag_Scheme >> Iflag_Flux >> IFlag_Reconstruction >> Kstep_show;
getline(fcin, tempStr); getline(fcin, tempStr);
fcin >> Num_Mesh >> Num_Threads >> Nstep_Inner_Limit >> Res_Inner_Limit;
getline(fcin, tempStr); getline(fcin, tempStr);
for (int i = 1; i <= Num_Mesh; ++i) {
fcin >> Pre_Step_Mesh[i];
}
fcin.close();
//printf("try= %d", Kstep_save);
if ((Time_Method == Time_LU_SGS || Time_Method == Time_Dual_LU_SGS) && 1 != Num_Mesh) {
printf("In this version (ver 1.5.1 ), LU_SGS method Do Not support Multigrid !!!\n");
printf("Please modify 'control.in' to choose single-grid or other time method\n");
}
if (Iflag_turbulence_model == Turbulence_SST) {
Nvar = 6; //6个变量(4个流场变量 + 湍动能k + 比耗散率w)
}
else if (Iflag_turbulence_model == Turbulence_SA) {
Nvar = 5;
}
else {
Nvar = 4;
}
AoA = AoA*PI / 180.e0;
Cv = 1.e0 / (gamma*(gamma - 1.e0)*Ma*Ma);
Cp = Cv*gamma;
Twall = Twall / T_inf;
}
//设定各重网格上的控制信息
void set_control_para()
{
Mesh_TYPE & MP = Mesh[1]; //最细的网格
//最细的挽歌上的控制参数和主控制参数相同
MP.Iflag_turbulence_model = Iflag_turbulence_model;
MP.Iflag_Scheme = Iflag_Scheme;
MP.IFlag_flux = Iflag_Flux;
MP.IFlag_Reconstruction = IFlag_Reconstruction;
MP.Nvar = Nvar; //变量(方程)数目(最密网格使用湍流模型,数目与Nvar相同)
//设定粗网格上的控制参数
for (int nMesh = 2; nMesh <= Num_Mesh; ++nMesh) {
Mesh_TYPE & MP = Mesh[nMesh];
MP.Iflag_turbulence_model = Turbulence_NONE;
MP.Iflag_Scheme = Scheme_UD1;
MP.IFlag_flux = Iflag_Flux;
MP.IFlag_Reconstruction = IFlag_Reconstruction;
MP.Nvar = 4; //变量(方程)数目,(粗网格不使用湍流模型,方程数目为4)
}
}
//-------------------------------------- -
//打印残差(最大残差和均方根残差)
void output_Res(int nMesh)
{
printf("\n\n Kstep= %d , t= %f \n", Mesh[nMesh].Kstep, Mesh[nMesh].tt);
printf("----------The Max Residuals are-------- ---Mesh--- %d\n", nMesh);
for (int i = 1; i <= Nvar; ++i) {
printf("%13.9f ", Mesh[nMesh].Res_max[i]);
}
printf("\n");
printf(" The R.M.S Residuals are \n");
for (int i = 1; i <= Nvar; ++i) {
printf("%13.9f ", Mesh[nMesh].Res_rms[i]);
}
printf("\n");
std::ofstream fcout;
if (!FileOpenFlag) {
FileOpenFlag = 1;
fcout.open("Residual.dat");
fcout.close();
}
fcout.open("Residual.dat", std::ios::app);
fcout << std::setprecision(15);
if (fcout.is_open()) {
fcout << Mesh[nMesh].Kstep << " ";
for (int i = 1; i <= Nvar; ++i) {
//printf("%13.9f ", Mesh[nMesh].Res_max[i]);
fcout << Mesh[nMesh].Res_max[i] << " ";
}
//fcout << std::endl;
for (int i = 1; i <= Nvar; ++i) {
//printf("%13.9f ", Mesh[nMesh].Res_rms[i]);
fcout << Mesh[nMesh].Res_rms[i] << " ";
}
fcout << std::endl;
fcout.close();
}
}
//----------------------------------------------------------------------
//输出几何及物理量 (tecplot格式), 最细网格flow2d.dat; 粗网格 flow2d - 2.dat; 最粗网格 flow2d - 3.dat
void output(int nMesh)
{
std::string filename;
if (nMesh == 1) {
filename = "flow2d.dat";
}
else {
filename = "flow2d-";
std::string tempStr = std::to_string(nMesh);
filename = filename + tempStr + ".plt"; // flow2d-2.dat ; flow2d-3.dat
}
if (nMesh == 1) {
std::string filename1 = "flow2d-";
std::string tempStr1 = std::to_string(Mesh[1].Kstep);
filename1 = filename1 + tempStr1 + ".plt"; // flow2d-2.dat ; flow2d-3.dat
Mesh_TYPE & MP = Mesh[1];
std::ofstream fcout;
fcout.open(filename1);
fcout << std::setprecision(12);
fcout << " variables=x,y,d,u,v,T,p " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
double d1 = B.U[i][j][1];
double u1 = B.U[i][j][2] / d1;
double v1 = B.U[i][j][3] / d1;
double T1 = (B.U[i][j][4] - 0.50*d1*(u1*u1 + v1*v1)) / (Cv*d1);
double p1 = d1*T1 / (gamma*Ma*Ma);
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << d1 << " " << u1 << " " << v1 << " " << T1 << " " << p1 << std::endl;
}
}
}
fcout.close();
}
printf("write data file ...\n");
Mesh_TYPE & MP = Mesh[nMesh];
std::ofstream fcout;
fcout.open(filename);
fcout << std::setprecision(12);
fcout << " variables=x,y,d,u,v,T,p,Amut " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
double d1 = B.U[i][j][1];
double u1 = B.U[i][j][2] / d1;
double v1 = B.U[i][j][3] / d1;
double T1 = (B.U[i][j][4] - 0.50*d1*(u1*u1 + v1*v1)) / (Cv*d1);
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << d1 << " " << u1 << " " << v1 << " " << T1 << " ";
fcout << d1*T1 / (gamma*Ma*Ma) << " " << B.Amu_t[i][j] * Re << std::endl;
}
}
}
fcout.close();
if (MP.Nvar == 5) {
fcout.open("SA2d.dat");
fcout << std::setprecision(12);
fcout << " variables=x,y,vt " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << B.U[i][j][5] << std::endl;
}
}
}
fcout.close();
}
if (MP.Nvar == 6) {
fcout.open("SST2D.dat");
fcout << std::setprecision(12);
fcout << " variables=x,y,Kt, Wt " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[nMesh].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = LAP; j <= B.ny + LAP; ++j) {
for (int i = LAP; i <= B.nx + LAP; ++i) {
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << B.U[i][j][5] << " " << B.U[i][j][6] << std::endl;
}
}
}
fcout.close();
}
}
void outputDebug()
{
#if debug
Mesh_TYPE & MP = Mesh[1];
std::ofstream fcout;
fcout.open("outputDebug.plt");
fcout << std::setprecision(12);
fcout << " variables=x,y,d,u,v,T,p " << std::endl;
for (int m = 1; m <= MP.Num_Block; ++m) {
Block_TYPE &B = Mesh[1].Block[m];
fcout << "zone i= " << B.nx + 1 << " j= " << B.ny + 1 << std::endl;
for (int j = 1; j <= B.ny + 2 * LAP - 1; ++j) {
for (int i = 1; i <= B.nx + 2 * LAP - 1; ++i) {
double d1 = B.U[i][j][1];
double u1 = B.U[i][j][2] / d1;
double v1 = B.U[i][j][3] / d1;
double T1 = (B.U[i][j][4] - 0.50*d1*(u1*u1 + v1*v1)) / (Cv*d1);
double p1 = d1*T1 / (gamma*Ma*Ma);
if (abs(d1) > 100 || abs(T1) > 100 || abs(u1) > 100 || abs(v1) > 100) {
printf("%d, %d, \n %f, %f, %f, %f,", i, j, d1, u1, v1, T1);
printf("this error is at %s file at %d line", __FILE__, __LINE__);
PAUSE;
}
fcout << B.x1[i][j] << " " << B.y1[i][j] << " " << d1 << " " << u1 << " " << v1 << " " << T1 << " " << p1 << std::endl;
}
}
}
fcout.close();
#endif
}
void myFree()
{
for (int iMesh = 1; iMesh <= Num_Mesh; ++iMesh) {
Mesh_TYPE & MP = Mesh[iMesh];
for (int iBlock = 1; iBlock <= MP.Num_Block; ++iBlock) {
Block_TYPE & B = MP.Block[iBlock];
int nx = B.nx; int ny = B.ny;
int mm = nx + 2 * LAP; int nn = ny + 2 * LAP;
deleteMatrix(B.x, mm); deleteMatrix(B.y, mm);
int mm1 = nx + 2 * LAP - 1; int nn1 = ny + 2 * LAP - 1;
deleteMatrix(B.x1, mm1); deleteMatrix(B.y1, mm1);
deleteMatrix(B.U, mm1, nn1); deleteMatrix(B.deltU, mm1, nn1);
deleteMatrix(B.Amu, mm1); deleteMatrix(B.Amu_t, mm1);
deleteMatrix(B.vol, nx);
deleteMatrix(B.si, nx); deleteMatrix(B.sj, nx);
deleteMatrix(B.ni1, nx); deleteMatrix(B.ni2, nx);
deleteMatrix(B.nj1, nx); deleteMatrix(B.nj2, nx);
//谱半径
deleteMatrix(B.Lci, nx); deleteMatrix(B.Lcj, nx);
deleteMatrix(B.Lvi, nx); deleteMatrix(B.Lvj, nx);
deleteMatrix(B.Un, nx, ny); //上一时间步的值
if (Time_Method == Time_Dual_LU_SGS) {
deleteMatrix(B.Un1, nx); //n-1时间步的值, 双时间步LU_SGS方法中采用
}
}
}
free(Mesh);
Mesh = NULL;
}
|
430da7217cf66b5359cffe27dd345337de476b6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingKernel(int b,int n,int c,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
float p1, p2;
for (int l=0;l<c;l++){
p1 = dataset[i*n*c+old*c+l];
p2 = dataset[i*n*c+k*c+l];
d += (p2-p1) * (p2-p1);
}
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingwithdistKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
d = dataset[i * n * n + old * n + k];
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingwithpreidxKernel(int b,int n,int c,int m,int m1,const float * __restrict__ dataset,const int * __restrict__ preidx,float * __restrict__ temp,int * __restrict__ idxs){
// b: batch_size, n: ndataset, c: channel_num, m: points_num after fps, m1: preidx number
// dataset: [b, n, c] preidx: [b, m1], temp: [b, n], idxs: [b, m]
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
int pre_idx;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
// update temp metrics
float pre_best = 1e38;
float pre_p1, pre_p2;
for (int k=0; k<m1; k++){
pre_idx = preidx[i * m1 + k];
float pre_d = 0;
for (int l=0; l < c; l++){
pre_p1 = dataset[i * n * c + pre_idx * c + l];
pre_p2 = dataset[i * n * c + j * c + l];
pre_d += (pre_p2 - pre_p1) * (pre_p2 - pre_p1);
}
pre_best = min(pre_best, pre_d);
}
temp[blockIdx.x*n+j] = pre_best;
}
// then find current smallest distance as current old
__syncthreads();
int old=0;
float pre_best = -1;
for (int j=0; j<n; j++){
if (pre_best < temp[blockIdx.x*n+j]){
pre_best = temp[blockIdx.x*n+j];
old = j;
}
}
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
float p1, p2;
for (int l=0;l<c;l++){
p1 = dataset[i*n*c+old*c+l];
p2 = dataset[i*n*c+k*c+l];
d += (p2-p1) * (p2-p1);
}
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
// inp: [b, n, c] idx: [b, m]
// out: [b, m, c]
__global__ void gatherpointKernel(int b,int n,int m,int c,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
int loop_time = b * m * c;
CUDA_1D_KERNEL_LOOP(index, loop_time){
int cur_batch_size = index / (m * c);
int cur_point_idx = index / c;
int cur_channel = index % c;
int a=idx[cur_point_idx];
int current_idx = cur_batch_size * (n * c) + a * c + cur_channel;
out[index] = inp[current_idx];
}
}
// out_g: [b, m, c] idx: [b, m]
// inp_g: [b, n, c]
__global__ void scatteraddpointKernel(int b,int n,int m,int c,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
int loop_time = b * m * c;
CUDA_1D_KERNEL_LOOP(index, loop_time){
int cur_batch_size = index / (m * c);
int cur_point_idx = index / c;
int cur_channel = index % c;
int a = idx[cur_point_idx];
int current_idx = cur_batch_size * n * c + a * c + cur_channel;
atomicAdd(&inp_g[current_idx],out_g[index]);
}
}
// inp: [b, n, c] mask: [b, n]
// out: [b, proposal_num, c]
__global__ void GatherByMaskKernel(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){
for (int cur_batch=blockIdx.x; cur_batch<b; cur_batch+=gridDim.x){
const float *cur_inp = inp + cur_batch * n * c;
const float *cur_mask = mask + cur_batch * n;
float* cur_out = out + cur_batch * proposal_num * c;
int proposal_cnt = 0;
int loop_time, tmp_channel_idx;
for (int cur_pts=0; cur_pts<n; cur_pts++){
if(int(cur_mask[cur_pts]) == 0) continue;
if(proposal_cnt == proposal_num) break;
// a valid proposal
if (proposal_cnt == 0){
loop_time = proposal_num * c;
for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){
tmp_channel_idx = i % c;
cur_out[i] = cur_inp[cur_pts * c + tmp_channel_idx];
}
__syncthreads();
}
else {
loop_time = c;
for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){
cur_out[proposal_cnt * c + i] = cur_inp[cur_pts * c + i];
}
__syncthreads();
}
proposal_cnt += 1;
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp_p,temp);
hipLaunchKernelGGL(( binarysearchKernel), dim3(dim3(32,8,1)),dim3(512), 0, 0, b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int c,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingKernel<1024>), dim3(b),dim3(1024), 0, 0, b,n,c,m,inp,temp,out);
}
//require 32*n working space
void farthestpointsamplingwithdistLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingwithdistKernel<1024>), dim3(b),dim3(1024), 0, 0, b,n,m,inp,temp,out);
}
//require 32*n working space
void farthestpointsamplingwithpreidxLauncher(int b,int n,int c,int m,int m1,const float * inp, const int* preidx,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingwithpreidxKernel<1024>), dim3(b),dim3(1024), 0, 0, b,n,c,m,m1,inp,preidx,temp,out);
}
void gatherpointLauncher(int b,int n,int m,int c,const float * inp,const int * idx,float * out){
hipLaunchKernelGGL(( gatherpointKernel), dim3(block_num),dim3(threadsPerBlock), 0, 0, b,n,m,c,inp,idx,out);
//int thread_num = 512 / b;
// gatherpointKernel<<<dim3(256,8,1),512>>>(b,n,m,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,int c,const float * out_g,const int * idx,float * inp_g){
hipLaunchKernelGGL(( scatteraddpointKernel), dim3(block_num),dim3(threadsPerBlock), 0, 0, b,n,m,c,out_g,idx,inp_g);
}
void GatherByMaskLauncher(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){
hipLaunchKernelGGL(( GatherByMaskKernel), dim3(block_num),dim3(threadsPerBlock), 0, 0, b,n,c,proposal_num,inp,mask,out);
}
| 430da7217cf66b5359cffe27dd345337de476b6d.cu | #include <stdio.h>
#include <iostream>
#include <vector>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingKernel(int b,int n,int c,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
float p1, p2;
for (int l=0;l<c;l++){
p1 = dataset[i*n*c+old*c+l];
p2 = dataset[i*n*c+k*c+l];
d += (p2-p1) * (p2-p1);
}
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingwithdistKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
d = dataset[i * n * n + old * n + k];
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingwithpreidxKernel(int b,int n,int c,int m,int m1,const float * __restrict__ dataset,const int * __restrict__ preidx,float * __restrict__ temp,int * __restrict__ idxs){
// b: batch_size, n: ndataset, c: channel_num, m: points_num after fps, m1: preidx number
// dataset: [b, n, c] preidx: [b, m1], temp: [b, n], idxs: [b, m]
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
int pre_idx;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
// update temp metrics
float pre_best = 1e38;
float pre_p1, pre_p2;
for (int k=0; k<m1; k++){
pre_idx = preidx[i * m1 + k];
float pre_d = 0;
for (int l=0; l < c; l++){
pre_p1 = dataset[i * n * c + pre_idx * c + l];
pre_p2 = dataset[i * n * c + j * c + l];
pre_d += (pre_p2 - pre_p1) * (pre_p2 - pre_p1);
}
pre_best = min(pre_best, pre_d);
}
temp[blockIdx.x*n+j] = pre_best;
}
// then find current smallest distance as current old
__syncthreads();
int old=0;
float pre_best = -1;
for (int j=0; j<n; j++){
if (pre_best < temp[blockIdx.x*n+j]){
pre_best = temp[blockIdx.x*n+j];
old = j;
}
}
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
float p1, p2;
for (int l=0;l<c;l++){
p1 = dataset[i*n*c+old*c+l];
p2 = dataset[i*n*c+k*c+l];
d += (p2-p1) * (p2-p1);
}
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
// inp: [b, n, c] idx: [b, m]
// out: [b, m, c]
__global__ void gatherpointKernel(int b,int n,int m,int c,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
int loop_time = b * m * c;
CUDA_1D_KERNEL_LOOP(index, loop_time){
int cur_batch_size = index / (m * c);
int cur_point_idx = index / c;
int cur_channel = index % c;
int a=idx[cur_point_idx];
int current_idx = cur_batch_size * (n * c) + a * c + cur_channel;
out[index] = inp[current_idx];
}
}
// out_g: [b, m, c] idx: [b, m]
// inp_g: [b, n, c]
__global__ void scatteraddpointKernel(int b,int n,int m,int c,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
int loop_time = b * m * c;
CUDA_1D_KERNEL_LOOP(index, loop_time){
int cur_batch_size = index / (m * c);
int cur_point_idx = index / c;
int cur_channel = index % c;
int a = idx[cur_point_idx];
int current_idx = cur_batch_size * n * c + a * c + cur_channel;
atomicAdd(&inp_g[current_idx],out_g[index]);
}
}
// inp: [b, n, c] mask: [b, n]
// out: [b, proposal_num, c]
__global__ void GatherByMaskKernel(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){
for (int cur_batch=blockIdx.x; cur_batch<b; cur_batch+=gridDim.x){
const float *cur_inp = inp + cur_batch * n * c;
const float *cur_mask = mask + cur_batch * n;
float* cur_out = out + cur_batch * proposal_num * c;
int proposal_cnt = 0;
int loop_time, tmp_channel_idx;
for (int cur_pts=0; cur_pts<n; cur_pts++){
if(int(cur_mask[cur_pts]) == 0) continue;
if(proposal_cnt == proposal_num) break;
// a valid proposal
if (proposal_cnt == 0){
loop_time = proposal_num * c;
for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){
tmp_channel_idx = i % c;
cur_out[i] = cur_inp[cur_pts * c + tmp_channel_idx];
}
__syncthreads();
}
else {
loop_time = c;
for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){
cur_out[proposal_cnt * c + i] = cur_inp[cur_pts * c + i];
}
__syncthreads();
}
proposal_cnt += 1;
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
cumsumKernel<<<32,512>>>(b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
cumsumKernel<<<32,512>>>(b,n,inp_p,temp);
binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int c,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<1024><<<b,1024>>>(b,n,c,m,inp,temp,out);
}
//require 32*n working space
void farthestpointsamplingwithdistLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingwithdistKernel<1024><<<b,1024>>>(b,n,m,inp,temp,out);
}
//require 32*n working space
void farthestpointsamplingwithpreidxLauncher(int b,int n,int c,int m,int m1,const float * inp, const int* preidx,float * temp,int * out){
farthestpointsamplingwithpreidxKernel<1024><<<b,1024>>>(b,n,c,m,m1,inp,preidx,temp,out);
}
void gatherpointLauncher(int b,int n,int m,int c,const float * inp,const int * idx,float * out){
gatherpointKernel<<<block_num,threadsPerBlock>>>(b,n,m,c,inp,idx,out);
//int thread_num = 512 / b;
// gatherpointKernel<<<dim3(256,8,1),512>>>(b,n,m,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,int c,const float * out_g,const int * idx,float * inp_g){
scatteraddpointKernel<<<block_num,threadsPerBlock>>>(b,n,m,c,out_g,idx,inp_g);
}
void GatherByMaskLauncher(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){
GatherByMaskKernel<<<block_num,threadsPerBlock>>>(b,n,c,proposal_num,inp,mask,out);
}
|
3a4663fea68927479aa1540fa683ef54052fcbba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*======================================================================
This file is part of the elastix software.
Copyright (c) University Medical Center Utrecht. All rights reserved.
See src/CopyrightElastix.txt or http://elastix.isi.uu.nl/legal.php for
details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
======================================================================*/
#include "CI/cubicTex3D.cu"
cuda::cudaTextures::texture_3D_t m_tex_coeffsX;
cuda::cudaTextures::texture_3D_t m_tex_coeffsY;
cuda::cudaTextures::texture_3D_t m_tex_coeffsZ;
cuda::cudaTextures::texture_3D_t m_tex_inputImage;
__device__ bool operator<(float3 a, float3 b)
{
return a.x < b.x && a.y < b.y && a.z < b.z;
}
__device__ bool operator>(float3 a, float b)
{
return a.x > b && a.y > b && a.z > b;
}
__device__ bool operator<(float3 a, float b)
{
return a.x < b && a.y < b && a.z < b;
}
__device__ bool operator>=(float3 a, float b)
{
return a.x >= b && a.y >= b && a.z >= b;
}
__device__ bool operator>=(float3 a, float3 b)
{
return a.x >= b.x && a.y >= b.y && a.z >= b.z;
}
__device__ int3 operator-(int3 a, int b)
{
return make_int3(a.x - b, a.y - b, a.z - b);
}
__device__ void operator+=(float3& a, float b)
{
a.x += b; a.y += b; a.z += b;
}
/* Convert an index that is an offset to a 3D matrix into its xyz coordinates */
__device__ __host__ int3 index2coord(int index, const int3 DIM)
{
/** WARNING: Direction is not yet taken into account! */
int tmp = DIM.x * DIM.y;
int3 res;
res.z = index / tmp;
tmp = index - (res.z * tmp);
res.y = tmp / DIM.x;
res.x = tmp - (res.y * DIM.x);
return res;
}
/* Apply a 3D B-spline transformation on a coordinate. */
__device__ float3 deform_at_coord(float3 coord)
{
float3 res;
/** Coordinate shift, since appearantly the space Ruijters lives in is
* a little shifted from our space.
*/
coord += 0.5f;
/** A B-spline transformation is seperable among its dimensions! */
res.x = cubicTex3D( m_tex_coeffsX, coord );
res.y = cubicTex3D( m_tex_coeffsY, coord );
res.z = cubicTex3D( m_tex_coeffsZ, coord );
return res;
}
__device__ float3 deform_at_coord_simple(float3 coord)
{
float3 res;
/** Coordinate shift, since appearantly the space Ruijters lives in is
* a little shifted from our space.
*/
coord += 0.5f;
/** A B-spline transformation is seperable among its dimensions! */
res.x = cubicTex3DSimple( m_tex_coeffsX, coord );
res.y = cubicTex3DSimple( m_tex_coeffsY, coord );
res.z = cubicTex3DSimple( m_tex_coeffsZ, coord );
return res;
}
/* Apply deformation to all voxels based on transform parameters and retrieve result. */
template <typename TImageType>
__global__ void resample_image( TImageType* dst,
int3 inputImageSize, int3 outputImageSize, size_t offset )
{
size_t id = threadIdx.x + ( blockIdx.x * blockDim.x );
/* Convert single index to coordinates. */
int3 coord = index2coord( id + offset, outputImageSize );
float3 out_coord = make_float3( coord.x, coord.y, coord.z );
/* Translate normal coordinates into world coordinates.
* WARNING: Direction is not yet taken into account!
*/
float3 out_coord_world = out_coord * CUOutputImageSpacing + CUOutputImageOrigin;
/* Translate world coordinates in terms of B-spline grid. */
float3 out_coord_world_bspline = ( out_coord_world - CUGridOrigin ) / CUGridSpacing;
/* Check if the sample is within the B-spline grid. */
bool isValidSample = ( out_coord_world_bspline >= 0.0f
&& out_coord_world_bspline < make_float3( CUGridSize - 2 ) );
float res = CUDefaultPixelValue;
if ( isValidSample )
{
/* B-Spline deform of a coordinate uses world coordinates. */
float3 deform = deform_at_coord( out_coord_world_bspline );
float3 inp_coord_world = out_coord_world + deform;
/* Translate world coordinates to normal coordinates.
* WARNING: Direction is not yet taken into account!
*/
float3 inp_coord = ( (inp_coord_world - CUInputImageOrigin) / CUInputImageSpacing ) + 0.5f;
/** Check if sample is inside input image. */
isValidSample = ( inp_coord > 0.0f ) && inp_coord < make_float3( inputImageSize );
/* Interpolate the moving/input image using 3-rd order B-spline. */
if ( isValidSample )
{
res = cubicTex3D( m_tex_inputImage, inp_coord );
}
}
dst[ id ] = static_cast<TImageType>( res );
}
/* Cast from one type to another type on the GPU. */
template <class TInputImageType, class TOutputImageType>
__global__ void cast_to_type( TOutputImageType* dst
, const TInputImageType* src, size_t nrOfVoxels )
{
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if ( id >= nrOfVoxels ) return;
dst[ id ] = (TOutputImageType)src[ id ];
//dst[ id ] = static_cast<TOutputImageType>( src[ id ] );
}
| 3a4663fea68927479aa1540fa683ef54052fcbba.cu | /*======================================================================
This file is part of the elastix software.
Copyright (c) University Medical Center Utrecht. All rights reserved.
See src/CopyrightElastix.txt or http://elastix.isi.uu.nl/legal.php for
details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
======================================================================*/
#include "CI/cubicTex3D.cu"
cuda::cudaTextures::texture_3D_t m_tex_coeffsX;
cuda::cudaTextures::texture_3D_t m_tex_coeffsY;
cuda::cudaTextures::texture_3D_t m_tex_coeffsZ;
cuda::cudaTextures::texture_3D_t m_tex_inputImage;
__device__ bool operator<(float3 a, float3 b)
{
return a.x < b.x && a.y < b.y && a.z < b.z;
}
__device__ bool operator>(float3 a, float b)
{
return a.x > b && a.y > b && a.z > b;
}
__device__ bool operator<(float3 a, float b)
{
return a.x < b && a.y < b && a.z < b;
}
__device__ bool operator>=(float3 a, float b)
{
return a.x >= b && a.y >= b && a.z >= b;
}
__device__ bool operator>=(float3 a, float3 b)
{
return a.x >= b.x && a.y >= b.y && a.z >= b.z;
}
__device__ int3 operator-(int3 a, int b)
{
return make_int3(a.x - b, a.y - b, a.z - b);
}
__device__ void operator+=(float3& a, float b)
{
a.x += b; a.y += b; a.z += b;
}
/* Convert an index that is an offset to a 3D matrix into its xyz coordinates */
__device__ __host__ int3 index2coord(int index, const int3 DIM)
{
/** WARNING: Direction is not yet taken into account! */
int tmp = DIM.x * DIM.y;
int3 res;
res.z = index / tmp;
tmp = index - (res.z * tmp);
res.y = tmp / DIM.x;
res.x = tmp - (res.y * DIM.x);
return res;
}
/* Apply a 3D B-spline transformation on a coordinate. */
__device__ float3 deform_at_coord(float3 coord)
{
float3 res;
/** Coordinate shift, since appearantly the space Ruijters lives in is
* a little shifted from our space.
*/
coord += 0.5f;
/** A B-spline transformation is seperable among its dimensions! */
res.x = cubicTex3D( m_tex_coeffsX, coord );
res.y = cubicTex3D( m_tex_coeffsY, coord );
res.z = cubicTex3D( m_tex_coeffsZ, coord );
return res;
}
__device__ float3 deform_at_coord_simple(float3 coord)
{
float3 res;
/** Coordinate shift, since appearantly the space Ruijters lives in is
* a little shifted from our space.
*/
coord += 0.5f;
/** A B-spline transformation is seperable among its dimensions! */
res.x = cubicTex3DSimple( m_tex_coeffsX, coord );
res.y = cubicTex3DSimple( m_tex_coeffsY, coord );
res.z = cubicTex3DSimple( m_tex_coeffsZ, coord );
return res;
}
/* Apply deformation to all voxels based on transform parameters and retrieve result. */
template <typename TImageType>
__global__ void resample_image( TImageType* dst,
int3 inputImageSize, int3 outputImageSize, size_t offset )
{
size_t id = threadIdx.x + ( blockIdx.x * blockDim.x );
/* Convert single index to coordinates. */
int3 coord = index2coord( id + offset, outputImageSize );
float3 out_coord = make_float3( coord.x, coord.y, coord.z );
/* Translate normal coordinates into world coordinates.
* WARNING: Direction is not yet taken into account!
*/
float3 out_coord_world = out_coord * CUOutputImageSpacing + CUOutputImageOrigin;
/* Translate world coordinates in terms of B-spline grid. */
float3 out_coord_world_bspline = ( out_coord_world - CUGridOrigin ) / CUGridSpacing;
/* Check if the sample is within the B-spline grid. */
bool isValidSample = ( out_coord_world_bspline >= 0.0f
&& out_coord_world_bspline < make_float3( CUGridSize - 2 ) );
float res = CUDefaultPixelValue;
if ( isValidSample )
{
/* B-Spline deform of a coordinate uses world coordinates. */
float3 deform = deform_at_coord( out_coord_world_bspline );
float3 inp_coord_world = out_coord_world + deform;
/* Translate world coordinates to normal coordinates.
* WARNING: Direction is not yet taken into account!
*/
float3 inp_coord = ( (inp_coord_world - CUInputImageOrigin) / CUInputImageSpacing ) + 0.5f;
/** Check if sample is inside input image. */
isValidSample = ( inp_coord > 0.0f ) && inp_coord < make_float3( inputImageSize );
/* Interpolate the moving/input image using 3-rd order B-spline. */
if ( isValidSample )
{
res = cubicTex3D( m_tex_inputImage, inp_coord );
}
}
dst[ id ] = static_cast<TImageType>( res );
}
/* Cast from one type to another type on the GPU. */
template <class TInputImageType, class TOutputImageType>
__global__ void cast_to_type( TOutputImageType* dst
, const TInputImageType* src, size_t nrOfVoxels )
{
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if ( id >= nrOfVoxels ) return;
dst[ id ] = (TOutputImageType)src[ id ];
//dst[ id ] = static_cast<TOutputImageType>( src[ id ] );
}
|
1ebeefd1c337e3e9efe9b5d297a5e9a0980a2aab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
/*
* Description:
*/
__device__ int translate_idx(int ii, int d1, int d2, int scale_factor)
{
int x, y, z;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
z = z/scale_factor;
d2 /= scale_factor;
return ((x*d1+y)*d2)+z;
}
__device__ int translate_idx_inv(int ii, int d1, int d2, int scale_factor, int off_x)
{
int x, y, z;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
z = z*scale_factor+off_x;
d2 *= scale_factor;
return ((x*d1+y)*d2)+z;
}
template <typename Dtype>
__global__ void upscale(Dtype *input, Dtype *output, int64_t no_elements,
int scale_factor, int d1, int d2)
{
// output offset:
int64_t ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, scale_factor);
output[ii]=input[ipidx];
}
/*
* Description:
*/
template <typename Dtype, typename Acctype>
__global__ void downscale(Dtype *gradInput_data, Dtype *gradOutput_data, int64_t no_elements,
int scale_factor, int d1, int d2)
{
// output offset:
int64_t ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
Acctype sum = Acctype(0);
for (int i=0; i < scale_factor; i++){
int ipidx = translate_idx_inv(ii, d1, d2, scale_factor, i);
sum += gradOutput_data[ipidx];
}
gradInput_data[ii] += ScalarConvert<Acctype, Dtype>::to(sum);
}
#include "generic/TemporalUpSamplingNearest.cu"
#include "THHGenerateFloatTypes.h"
| 1ebeefd1c337e3e9efe9b5d297a5e9a0980a2aab.cu | #include "THCUNN.h"
#include "common.h"
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
/*
* Description:
*/
__device__ int translate_idx(int ii, int d1, int d2, int scale_factor)
{
int x, y, z;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
z = z/scale_factor;
d2 /= scale_factor;
return ((x*d1+y)*d2)+z;
}
__device__ int translate_idx_inv(int ii, int d1, int d2, int scale_factor, int off_x)
{
int x, y, z;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
z = z*scale_factor+off_x;
d2 *= scale_factor;
return ((x*d1+y)*d2)+z;
}
template <typename Dtype>
__global__ void upscale(Dtype *input, Dtype *output, int64_t no_elements,
int scale_factor, int d1, int d2)
{
// output offset:
int64_t ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, scale_factor);
output[ii]=input[ipidx];
}
/*
* Description:
*/
template <typename Dtype, typename Acctype>
__global__ void downscale(Dtype *gradInput_data, Dtype *gradOutput_data, int64_t no_elements,
int scale_factor, int d1, int d2)
{
// output offset:
int64_t ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
Acctype sum = Acctype(0);
for (int i=0; i < scale_factor; i++){
int ipidx = translate_idx_inv(ii, d1, d2, scale_factor, i);
sum += gradOutput_data[ipidx];
}
gradInput_data[ii] += ScalarConvert<Acctype, Dtype>::to(sum);
}
#include "generic/TemporalUpSamplingNearest.cu"
#include "THCGenerateFloatTypes.h"
|
8e530265352b71eaafeb47c252858108673994e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fractal.h"
#define _DEBUG_
CudaFractalGenerator::CudaFractalGenerator(uint32_t w, uint32_t h){
m_w = w;
m_h = h;
selected_fractal = MANDELBROT;
m_scale = 1.0f;
m_curr_scale = 1.0f;
m_new_scale = 1.0f;
m_curr_world_x = 0.0f;
m_curr_world_y = 0.0f;
m_new_world_x = 0.0f;
m_new_world_x = 0.0f;
create_opengl_buffers();
hipSetDevice(0);
timer = new Timer();
timer->start();
}
CudaFractalGenerator::~CudaFractalGenerator(){
auto e = hipGraphicsUnregisterResource(textures[TEXTURE_FRONT].cuda);
e = hipGraphicsUnregisterResource(textures[TEXTURE_BACK].cuda);
glDisableVertexAttribArray(vertex_array);
glDeleteBuffers(2,vbo);
glDeleteVertexArrays(1,&vertex_array);
delete timer;
}
GLuint CudaFractalGenerator::get_texture_sampler(){
return texture_sampler;
}
void CudaFractalGenerator::render(){
glBindVertexArray(vertex_array);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,ibo);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE_FRONT].gl);
glDrawElements(GL_TRIANGLES, 2 * 3, GL_UNSIGNED_SHORT, (void*)0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
}
void CudaFractalGenerator::update(){
cuda_pass();
}
glm::mat4 CudaFractalGenerator::get_model(){
//TODO::::
glm::mat4 trans_mat = glm::translate(glm::mat4(1.0f),
glm::vec3( m_world_y - m_curr_world_y,
m_world_x - m_curr_world_x,
1.0f));
glm::mat4 scale_mat = glm::scale(glm::mat4(1.0f), glm::vec3(m_curr_scale / m_scale));
return scale_mat ;
}
void CudaFractalGenerator::cuda_pass(){
if(working_on_texture &&
hipEventQuery(cuda_event) == hipSuccess){
auto e = hipEventDestroy(cuda_event);
e = hipDestroySurfaceObject(surface);
e = hipGraphicsUnmapResources(1, &textures[TEXTURE_BACK].cuda, 0);
struct Texture_Container temp = textures[TEXTURE_FRONT];
textures[TEXTURE_FRONT] = textures[TEXTURE_BACK];
textures[TEXTURE_BACK] = temp;
working_on_texture = false;
m_curr_scale = m_new_scale;
m_curr_world_x = m_new_world_x;
m_curr_world_y = m_new_world_y;
last_frame_time = timer->tick();
printf("iter: %.d, mslf: %.4f\n",m_iterations, last_frame_time);
}
if(!working_on_texture && changed){
m_new_scale = m_scale;
m_new_world_x = m_world_x;
m_new_world_y = m_world_y;
timer->tick();
auto e = hipGraphicsMapResources(1, &textures[TEXTURE_BACK].cuda, 0);
hipArray_t texture_array;
e = hipGraphicsSubResourceGetMappedArray(&texture_array, textures[TEXTURE_BACK].cuda, 0, 0);
struct hipResourceDesc desc;
memset(&desc, 0, sizeof(struct hipResourceDesc));
desc.resType = hipResourceTypeArray;
desc.res.array.array = texture_array;
e = hipCreateSurfaceObject(&surface, &desc);
e = hipEventCreateWithFlags(&cuda_event, hipEventDisableTiming);
working_on_texture = true;
changed = false;
generate_fractal(surface);
}
}
void CudaFractalGenerator::set_iterations(uint32_t iterations){
if(m_iterations != iterations){
changed = true;
m_iterations = iterations;
}
}
void CudaFractalGenerator::set_world_pos(double world_x, double world_y){
if(m_world_x != world_x ||
m_world_y != world_y){
changed = true;
m_world_x = world_x;
m_world_y = world_y;
}
}
void CudaFractalGenerator::move_julia_constant(double delta_x, double delta_y){
m_mandelbrot_x += delta_x;
m_mandelbrot_y += delta_y;
changed = true;
}
void CudaFractalGenerator::set_scale(double scale){
if(m_scale != scale){
m_scale = scale;
changed = true;
}
}
void CudaFractalGenerator::set_fractal(uint32_t fractal){
if (fractal < NUM_FRACTALS)
selected_fractal = fractal;
changed = true;
}
void CudaFractalGenerator::create_opengl_buffers(){
glGenVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
glGenBuffers(2, vbo);
//Positions
glBindBuffer(GL_ARRAY_BUFFER, vbo[POSITION_ATTR]);
glBufferData(GL_ARRAY_BUFFER,
4 * 2 * sizeof(GLfloat),
positions, GL_STATIC_DRAW);
glVertexAttribPointer(POSITION_ATTR, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(POSITION_ATTR);
//Texture coords
glBindBuffer(GL_ARRAY_BUFFER, vbo[TEXTURE_ATTR]);
glBufferData(GL_ARRAY_BUFFER,
4 * 2 * sizeof(GLfloat),
texture_coords, GL_STATIC_DRAW);
glVertexAttribPointer(TEXTURE_ATTR, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(TEXTURE_ATTR);
//Texture
glGenTextures(1, &textures[TEXTURE_FRONT].gl);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE_FRONT].gl);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F,
m_w, m_h, 0, GL_RGBA,
GL_FLOAT, nullptr);
auto e = hipGraphicsGLRegisterImage(&textures[TEXTURE_FRONT].cuda,
textures[TEXTURE_FRONT].gl,
GL_TEXTURE_2D,
hipGraphicsRegisterFlagsSurfaceLoadStore);
glGenTextures(1, &textures[TEXTURE_BACK].gl);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE_BACK].gl);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F,
m_w, m_h, 0, GL_RGBA,
GL_FLOAT, nullptr);
e = hipGraphicsGLRegisterImage(&textures[TEXTURE_BACK].cuda,
textures[TEXTURE_BACK].gl,
GL_TEXTURE_2D,
hipGraphicsRegisterFlagsSurfaceLoadStore);
texture_sampler = 0;
//Indices
glGenBuffers(1,&ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
2 * 3 * sizeof(GLushort),
indices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glBindVertexArray(0);
}
void CudaFractalGenerator::generate_fractal(hipSurfaceObject_t surface){
dim3 block(BLOCK_N, BLOCK_N);
dim3 grid((uint32_t) ceil( (double)m_w / (double)BLOCK_N ),
(uint32_t) ceil( (double)m_h / (double)BLOCK_N ));
switch(selected_fractal){
case BURNING_SHIP:
hipLaunchKernelGGL(( burning_ship_kernel), dim3(grid),dim3(block),0, 0, surface,
m_w, m_h,
m_world_x - (((double) m_h) * m_scale) / 2.0f,
m_world_y - (((double) m_w) * m_scale) / 2.0f,
((double) m_w) * m_scale,
((double) m_h) * m_scale,
m_iterations);
break;
case JULIA_SET:
hipLaunchKernelGGL(( julia_set_kernel), dim3(grid),dim3(block), 0, 0, surface,
m_w, m_h,
m_mandelbrot_x, m_mandelbrot_y,
m_world_x - (((double) m_h) * m_scale) / 2.0f,
m_world_y - (((double) m_w) * m_scale) / 2.0f,
((double) m_w) * m_scale,
((double) m_h) * m_scale,
m_iterations);
break;
case MANDELBROT:
default:
m_mandelbrot_x = m_world_x - (((double) m_h) * m_scale) / 2.0f;
m_mandelbrot_y = m_world_y - (((double) m_w) * m_scale) / 2.0f;
hipLaunchKernelGGL(( mandelbrot_kernel), dim3(grid),dim3(block),0, 0, surface,
m_w, m_h,
m_world_x - (((double) m_h) * m_scale) / 2.0f,
m_world_y - (((double) m_w) * m_scale) / 2.0f,
((double) m_w) * m_scale,
((double) m_h) * m_scale,
m_iterations);
}
hipEventRecord(cuda_event,0);
}
| 8e530265352b71eaafeb47c252858108673994e3.cu | #include "fractal.h"
#define _DEBUG_
CudaFractalGenerator::CudaFractalGenerator(uint32_t w, uint32_t h){
m_w = w;
m_h = h;
selected_fractal = MANDELBROT;
m_scale = 1.0f;
m_curr_scale = 1.0f;
m_new_scale = 1.0f;
m_curr_world_x = 0.0f;
m_curr_world_y = 0.0f;
m_new_world_x = 0.0f;
m_new_world_x = 0.0f;
create_opengl_buffers();
cudaSetDevice(0);
timer = new Timer();
timer->start();
}
CudaFractalGenerator::~CudaFractalGenerator(){
auto e = cudaGraphicsUnregisterResource(textures[TEXTURE_FRONT].cuda);
e = cudaGraphicsUnregisterResource(textures[TEXTURE_BACK].cuda);
glDisableVertexAttribArray(vertex_array);
glDeleteBuffers(2,vbo);
glDeleteVertexArrays(1,&vertex_array);
delete timer;
}
GLuint CudaFractalGenerator::get_texture_sampler(){
return texture_sampler;
}
void CudaFractalGenerator::render(){
glBindVertexArray(vertex_array);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,ibo);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE_FRONT].gl);
glDrawElements(GL_TRIANGLES, 2 * 3, GL_UNSIGNED_SHORT, (void*)0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
glBindVertexArray(0);
}
void CudaFractalGenerator::update(){
cuda_pass();
}
glm::mat4 CudaFractalGenerator::get_model(){
//TODO::::
glm::mat4 trans_mat = glm::translate(glm::mat4(1.0f),
glm::vec3( m_world_y - m_curr_world_y,
m_world_x - m_curr_world_x,
1.0f));
glm::mat4 scale_mat = glm::scale(glm::mat4(1.0f), glm::vec3(m_curr_scale / m_scale));
return scale_mat ;
}
void CudaFractalGenerator::cuda_pass(){
if(working_on_texture &&
cudaEventQuery(cuda_event) == cudaSuccess){
auto e = cudaEventDestroy(cuda_event);
e = cudaDestroySurfaceObject(surface);
e = cudaGraphicsUnmapResources(1, &textures[TEXTURE_BACK].cuda, 0);
struct Texture_Container temp = textures[TEXTURE_FRONT];
textures[TEXTURE_FRONT] = textures[TEXTURE_BACK];
textures[TEXTURE_BACK] = temp;
working_on_texture = false;
m_curr_scale = m_new_scale;
m_curr_world_x = m_new_world_x;
m_curr_world_y = m_new_world_y;
last_frame_time = timer->tick();
printf("iter: %.d, mslf: %.4f\n",m_iterations, last_frame_time);
}
if(!working_on_texture && changed){
m_new_scale = m_scale;
m_new_world_x = m_world_x;
m_new_world_y = m_world_y;
timer->tick();
auto e = cudaGraphicsMapResources(1, &textures[TEXTURE_BACK].cuda, 0);
cudaArray_t texture_array;
e = cudaGraphicsSubResourceGetMappedArray(&texture_array, textures[TEXTURE_BACK].cuda, 0, 0);
struct cudaResourceDesc desc;
memset(&desc, 0, sizeof(struct cudaResourceDesc));
desc.resType = cudaResourceTypeArray;
desc.res.array.array = texture_array;
e = cudaCreateSurfaceObject(&surface, &desc);
e = cudaEventCreateWithFlags(&cuda_event, cudaEventDisableTiming);
working_on_texture = true;
changed = false;
generate_fractal(surface);
}
}
void CudaFractalGenerator::set_iterations(uint32_t iterations){
if(m_iterations != iterations){
changed = true;
m_iterations = iterations;
}
}
void CudaFractalGenerator::set_world_pos(double world_x, double world_y){
if(m_world_x != world_x ||
m_world_y != world_y){
changed = true;
m_world_x = world_x;
m_world_y = world_y;
}
}
void CudaFractalGenerator::move_julia_constant(double delta_x, double delta_y){
m_mandelbrot_x += delta_x;
m_mandelbrot_y += delta_y;
changed = true;
}
void CudaFractalGenerator::set_scale(double scale){
if(m_scale != scale){
m_scale = scale;
changed = true;
}
}
void CudaFractalGenerator::set_fractal(uint32_t fractal){
if (fractal < NUM_FRACTALS)
selected_fractal = fractal;
changed = true;
}
void CudaFractalGenerator::create_opengl_buffers(){
glGenVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
glGenBuffers(2, vbo);
//Positions
glBindBuffer(GL_ARRAY_BUFFER, vbo[POSITION_ATTR]);
glBufferData(GL_ARRAY_BUFFER,
4 * 2 * sizeof(GLfloat),
positions, GL_STATIC_DRAW);
glVertexAttribPointer(POSITION_ATTR, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(POSITION_ATTR);
//Texture coords
glBindBuffer(GL_ARRAY_BUFFER, vbo[TEXTURE_ATTR]);
glBufferData(GL_ARRAY_BUFFER,
4 * 2 * sizeof(GLfloat),
texture_coords, GL_STATIC_DRAW);
glVertexAttribPointer(TEXTURE_ATTR, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(TEXTURE_ATTR);
//Texture
glGenTextures(1, &textures[TEXTURE_FRONT].gl);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE_FRONT].gl);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F,
m_w, m_h, 0, GL_RGBA,
GL_FLOAT, nullptr);
auto e = cudaGraphicsGLRegisterImage(&textures[TEXTURE_FRONT].cuda,
textures[TEXTURE_FRONT].gl,
GL_TEXTURE_2D,
cudaGraphicsRegisterFlagsSurfaceLoadStore);
glGenTextures(1, &textures[TEXTURE_BACK].gl);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE_BACK].gl);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F,
m_w, m_h, 0, GL_RGBA,
GL_FLOAT, nullptr);
e = cudaGraphicsGLRegisterImage(&textures[TEXTURE_BACK].cuda,
textures[TEXTURE_BACK].gl,
GL_TEXTURE_2D,
cudaGraphicsRegisterFlagsSurfaceLoadStore);
texture_sampler = 0;
//Indices
glGenBuffers(1,&ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
2 * 3 * sizeof(GLushort),
indices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glBindVertexArray(0);
}
void CudaFractalGenerator::generate_fractal(cudaSurfaceObject_t surface){
dim3 block(BLOCK_N, BLOCK_N);
dim3 grid((uint32_t) ceil( (double)m_w / (double)BLOCK_N ),
(uint32_t) ceil( (double)m_h / (double)BLOCK_N ));
switch(selected_fractal){
case BURNING_SHIP:
burning_ship_kernel<<<grid,block,0>>>(surface,
m_w, m_h,
m_world_x - (((double) m_h) * m_scale) / 2.0f,
m_world_y - (((double) m_w) * m_scale) / 2.0f,
((double) m_w) * m_scale,
((double) m_h) * m_scale,
m_iterations);
break;
case JULIA_SET:
julia_set_kernel<<<grid,block>>>(surface,
m_w, m_h,
m_mandelbrot_x, m_mandelbrot_y,
m_world_x - (((double) m_h) * m_scale) / 2.0f,
m_world_y - (((double) m_w) * m_scale) / 2.0f,
((double) m_w) * m_scale,
((double) m_h) * m_scale,
m_iterations);
break;
case MANDELBROT:
default:
m_mandelbrot_x = m_world_x - (((double) m_h) * m_scale) / 2.0f;
m_mandelbrot_y = m_world_y - (((double) m_w) * m_scale) / 2.0f;
mandelbrot_kernel<<<grid,block,0>>>(surface,
m_w, m_h,
m_world_x - (((double) m_h) * m_scale) / 2.0f,
m_world_y - (((double) m_w) * m_scale) / 2.0f,
((double) m_w) * m_scale,
((double) m_h) * m_scale,
m_iterations);
}
cudaEventRecord(cuda_event,0);
}
|
c389fed9b8c85f086c2d84aaccca2bc10a2f567a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Spiking.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../common/util.h"
#include "../readData/readSpeechData.h"
#include <fstream>
#include <assert.h>
#include <math.h>
//#define DEBUG
#define I_IDX 0
#define O_IDX 0
/*
* Device func for accumulate the spike response
*
*/
__device__ float d_Spiking_accumulate_spikes(
int inputSize,
int outputSize,
float* input_resp,
bool* output,
int o_idx,
float* weights,
float* weights_lat,
float* biases,
int t,
int dummyFreq,
int endTime);
/*
* Device func for spike gradient for each pair of binary spike response
*/
__device__ float d_Spiking_gradient(
bool* output,
bool* input,
float delta,
int o_idx,
int i_idx,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* Device func for spike gradient for each pair of spike train in times
*/
__device__ float d_Spiking_gradient_spiketime(
int* output_time,
int* input_time,
int n_ospikes,
int n_ispikes,
float delta,
int o_idx,
int i_idx,
float lat_factor,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* Device func for spike gradient for bias in times
*/
__device__ float d_Spiking_bias_gradient_spiketime(
int* output_time,
int n_ospikes,
float delta,
int o_idx,
int dummyFreq,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getCost_output(
int* fireCount,
float* groundTruth,
float* cost,
int* y,
int batch,
int cols,
float UNDESIRED_LEVEL,
float DESIRED_LEVEL,
float MARGIN);
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getDelta_output(
float* outputDelta,
int* fireCount,
float* groundTruth,
int len,
float MARGIN);
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(1024, outputSize));
*/
__global__ void g_getMaxCount(
int* fireCount,
int* maxCount,
int cols);
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_wgrad(
bool* inputs,
bool* outputs,
float* curDelta,
float* wgradTmp,
int inputSize,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_sideEffect(
float* weights,
int* batchFireCount,
float* batchAccEffect,
float vth,
int inputSize,
int outputSize,
float * batchSideEffect);
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_spiketime(
float* batchSideEffect,
float* batchAccEffect,
float* curDelta,
float* latFactor,
float* wgradTmp,
int inputSize,
int outputSize);
/*
* dim3 block = dim3(outputSize);
* dim3 thread= dim3(batch);
*/
__global__ void g_Spiking_bgrad_spiketime(
int* outputs_time,
int* batchFireCount,
float* curDelta,
float* bgradTmp,
int outputSize,
int endTime,
int dummyFreq,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* block = dim3(outputSize * inputSize);
* thread= dim3(batch);
*/
__global__ void g_Spiking_gradAdd(
float* wgradTmp,
float* wgrad,
float* w,
float* w_sq_sum,
int batch,
float lambda,
float beta,
float limit,
int inputSize,
int wArea);
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_debug_spiketime(
int* inputs_time,
int* outputs_time,
int* batchPreFireCount,
int* batchFireCount,
int inputSize,
int outputSize,
int endTime);
void Spiking::calCost()
{
cost->gpuClear();
if(predict == NULL){
printf("Warning::Try to compute the cost when the predict is not properly set!\n ");
return;
}
hipLaunchKernelGGL(( g_getCost_output), dim3(dim3(1)), dim3(dim3(256)), sizeof(float) * 256, 0, fireCount->getDev(),
groundTruth->getDev(),
cost->getDev(),
predict,
batch,
fireCount->cols,
UNDESIRED_LEVEL,
DESIRED_LEVEL,
MARGIN);
hipStreamSynchronize(0);
getLastCudaError("Spiking:g_getCost_output");
}
void Spiking::feedforward()
{
if((inputs == NULL))
{
printf("Spiking init error\n");
exit(0);
}
// fast input response
hipLaunchKernelGGL(( g_cast_bool_2_float), dim3(dim3(batch, endTime)), dim3(min(1024, inputSize)), 0, 0, inputs->getDev(), endTime, inputSize, inputs->cols, inputs->channels, batch, inputs_float->getDev());
matrixMul(w, inputs_float, inputs_resp_tmp); //input_resp_tmp rows:outputSize; cols:endTime*batch
hipLaunchKernelGGL(( g_transform_2_batch), dim3(dim3(batch, outputSize)), dim3(min(1024, endTime)), 0, 0, inputs_resp_tmp->getDev(), endTime, outputSize, batch, inputs_resp->getDev());
// convert (batch, inputDim2*endTime, amount) to (batch, amount*inputDim2*endTime, 1)
hipLaunchKernelGGL(( g_convert_spiketimes), dim3(dim3(batch, endTime)), dim3(min(1024, inputSize)), 0, 0, inputs_time->getDev(), endTime, inputSize, inputs_time->cols, inputs_time->channels, batch, inputs_time_format->getDev());
// convert (batch, inputDim2, amount) to (batch, amount*inputDim2, 1)
hipLaunchKernelGGL(( g_convert_firecounts), dim3(dim3(batch)), dim3(min(1024, inputSize)), 0, 0, preFireCount->getDev(), preFireCount->getArea(), inputSize, preFireCount->cols, preFireCount->channels, batch, preFireCount_format->getDev());
dim3 thread= dim3(min(1024, outputSize));
dim3 block = dim3(batch);
ConfigSpiking * config = (ConfigSpiking*) Config::instance()->getLayerByName(m_name);
int dummyFreq = config->getBiasFreq();
hipLaunchKernelGGL(( g_Spiking_feedforward), dim3(block), dim3(thread), 0, 0,
inputs_resp->getDev(),
w->getDev(),
w_laterial == NULL ? NULL : w_laterial->getDev(),
b->getDev(),
outputs->getDev(),
fireCount->getDev(),
inputSize,
outputSize,
endTime,
threshold,
dummyFreq,
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("Spiking::g_Spiking_feedforward");
block = dim3(batch, 1);
thread = dim3(min(outputSize, 1024));
// transform the binary response matrix to the spike times
hipLaunchKernelGGL(( g_response_2_spiketime), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs_time->getDev(),
outputs->getArea(),
outputSize,
endTime);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("Spiking:g_response_2_spiketime");
}
void Spiking::backpropagation()
{
if(m_name == std::string("output")){
// compute the cost function
hipLaunchKernelGGL(( g_getCost_output), dim3(dim3(1)), dim3(dim3(256)), sizeof(float) * 256, 0, fireCount->getDev(), groundTruth->getDev(), cost->getDev(), predict, batch, fireCount->cols, UNDESIRED_LEVEL, DESIRED_LEVEL, MARGIN);
hipStreamSynchronize(0);
getLastCudaError("Spiking::g_getCost_output");
// compute the delta (error)
hipLaunchKernelGGL(( g_getDelta_output), dim3(dim3(1)), dim3(dim3(256)), 0, 0, curDelta->getDev(), fireCount->getDev(), groundTruth->getDev(), curDelta->getLen(), MARGIN);
hipStreamSynchronize(0);
getLastCudaError("Spiking::g_getDelta_output");
// apply the sample weights
hipLaunchKernelGGL(( g_boostWeight_output), dim3(dim3(batch)), dim3(dim3(outputSize)), 0, 0, curDelta->getDev(), sample_weights, curDelta->getLen());
hipStreamSynchronize(0);
getLastCudaError("Spiking::g_boostWeight_output");
// compute the lateral factors if applicable
if(lateralFactor != NULL && w_laterial != NULL){
int threads = min(outputSize, 1024);
hipLaunchKernelGGL(( g_getLateralFactor_output), dim3(dim3(batch, outputSize)), dim3(threads), sizeof(float) * threads, 0,
outputs_time->getDev(),
fireCount->getDev(),
lateralW,
predict,
lateralFactor->getDev(),
threshold,
outputSize,
endTime,
T_REFRAC,
TAU_M,
TAU_S);
hipStreamSynchronize(0);
getLastCudaError("Spiking::g_getLateralFactor_output");
}
// modify the output spikes of the target neuron if it does not fire
// tricky: modify both the spike trains and output fire counts!
hipLaunchKernelGGL(( g_modifySpikes), dim3(dim3(batch)), dim3(dim3(min(outputSize, 1024))), 0, 0, outputs->getDev(), predict, fireCount->getDev(), DESIRED_LEVEL, endTime, outputSize);
hipStreamSynchronize(0);
getLastCudaError("Spiking::g_modifySpikes");
// retransform the binary matrix to the spike times since the outputs might be changed
hipLaunchKernelGGL(( g_response_2_spiketime), dim3(dim3(batch, 1)), dim3(dim3(min(outputSize, 1024))), 0, 0,
outputs->getDev(),
outputs_time->getDev(),
outputs->getArea(),
outputSize,
endTime);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("Spiking:g_response_2_spiketime");
}
// pre compute the accumulative synaptic effect, and effect ratio (if applicable)
dim3 thread = dim3(min(1024, outputSize));
dim3 block = dim3(batch, inputSize);
hipFuncSetCacheConfig(g_Spiking_synaptic_effect, hipFuncCachePreferL1);
hipLaunchKernelGGL(( g_Spiking_synaptic_effect), dim3(block), dim3(thread), 0, 0,
inputs_time_format->getDev(),
outputs_time->getDev(),
preFireCount_format->getDev(),
fireCount->getDev(),
w->getDev(),
accEffect->getDev(),
effectRatio == NULL ? NULL : effectRatio->getDev(),
inputSize,
outputSize,
endTime,
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_Spiking_synaptic_effect");
// divide the curDelta by vth
block = dim3(batch, 1);
thread = dim3(min(1024, outputSize));
hipLaunchKernelGGL(( g_divide_by_threshold), dim3(block), dim3(thread), 0, 0, curDelta->getDev(), curDelta->getArea(), curDelta->cols, threshold);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_divide_by_threshold");
// compute preDelta: curDelta: batch * outputSize; w: outputSize * inputSize
if(preDelta == NULL){
ConfigSpiking* config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
assert(config->m_input == "data");
}
else{
if(effectRatio != NULL){
matrixMul(curDelta, effectRatio, preDelta_format);
}
else{
matrixMul(curDelta, w, preDelta_format);
}
// preDelta_format: (batch, channels * size, 1) -> preDelta: (batch, size, channels)
block = batch;
thread = min(512, preDelta->channels * preDelta->cols);
hipLaunchKernelGGL(( g_preDeltaFormat), dim3(block), dim3(thread), 0, 0, preDelta_format->getDev(), preDelta->getDev(),
preDelta->rows, preDelta->cols, preDelta->channels);
hipStreamSynchronize(0);
getLastCudaError("g_preDeltaFormat");
}
}
/*
* block = dim3(outputSize, 1);
* thread= dim3(min(inputSize, 1024));
*/
__global__ void g_Spiking_calSquareSum(
float* w,
float* w_sq_sum,
int outputSize,
int inputSize,
float weight_limit)
{
extern __shared__ float _sum[];
int o_id = blockIdx.x;
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < inputSize; i += blockDim.x)
{
int id = i + tid;
if(id < inputSize)
{
int wid = id + o_id * inputSize;
float weight = w[wid];
_sum[tid] += (weight/weight_limit) * (weight/weight_limit);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
w_sq_sum[o_id] = _sum[0] / inputSize;
}
/*
* block = dim3(outputSize * inputSize);
* thread= dim3(batch);
*/
__global__ void g_Spiking_gradAdd(
float* wgradTmp,
float* wgrad,
float* w,
float* w_sq_sum,
int batch,
float lambda,
float beta,
float limit,
int inputSize,
int wArea)
{
extern __shared__ float _sum[];
int wid = blockIdx.x;
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < batch; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < batch)
{
_sum[threadIdx.x] += wgradTmp[b * wArea + wid];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
{
float sq_sum = w_sq_sum[wid / inputSize];
wgrad[wid] = _sum[0] / batch + lambda*beta*(w[wid]/limit)*__expf(beta*(sq_sum - 1));
}
}
void Spiking::getGrad()
{
dim3 thread = dim3(min(1024, inputSize));
dim3 block = dim3(batch, outputSize);
hipLaunchKernelGGL(( g_Spiking_wgrad_sideEffect), dim3(block), dim3(thread), sizeof(float) * min(1024, inputSize), 0,
w->getDev(),
fireCount->getDev(),
accEffect->getDev(),
threshold,
inputSize,
outputSize,
sideEffect->getDev());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_Spiking_wgrad_sideEffect");
hipFuncSetCacheConfig(g_Spiking_wgrad_spiketime,hipFuncCachePreferL1);
hipLaunchKernelGGL(( g_Spiking_wgrad_spiketime), dim3(block), dim3(thread), 0, 0,
sideEffect->getDev(),
accEffect->getDev(),
curDelta->getDev(),
lateralFactor == NULL ? NULL : lateralFactor->getDev(),
wgradTmp->getDev(),
inputSize,
outputSize);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_Spiking_wgrad_spiketime");
#ifdef DEBUG
hipLaunchKernelGGL(( g_Spiking_debug_spiketime), dim3(block), dim3(thread), 0, 0, inputs_time->getDev(), outputs_time->getDev(), preFireCount->getDev(), fireCount->getDev(), inputSize, outputSize, endTime);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_Spiking_debug_spiketime");
#endif
block = dim3(outputSize);
thread = dim3(min(inputSize, 1024));
hipLaunchKernelGGL(( g_Spiking_calSquareSum), dim3(block), dim3(thread), sizeof(float) * min(inputSize, 1024), 0,
w->getDev(),
weightSqSum->getDev(),
outputSize,
inputSize,
weightLimit);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_Spiking_calSquareSum");
block = dim3(outputSize * inputSize);
thread = dim3(batch);
hipLaunchKernelGGL(( g_Spiking_gradAdd), dim3(block), dim3(thread), sizeof(float) * batch, 0,
wgradTmp->getDev(),
wgrad->getDev(),
w->getDev(),
weightSqSum->getDev(),
batch,
lambda,
beta,
weightLimit,
inputSize,
w->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_Spiking_gradAdd");
// add the bias derivation here:
}
void Spiking::updateWeight()
{
dim3 block = min((w->getLen() + 255)/ 256, 5120);
dim3 thread = 256;
if(Config::instance()->getOptimizerType() == std::string("adam")){
hipLaunchKernelGGL(( g_adam_vecAdd), dim3(block), dim3(thread), 0, Layers::instance()->get_stream(),
g1_w->getDev(),
g2_w->getDev(),
b1_t,
b2_t,
wgrad->getDev(),
w->getDev(),
w->getLen(),
Config::instance()->getLrate());
b1_t *= 0.9f; b2_t *= 0.999f;
}
else{
hipLaunchKernelGGL(( g_sgd_vecAdd), dim3(block), dim3(thread), 0, Layers::instance()->get_stream(),
momentum_w->getDev(),
wgrad->getDev(),
w->getDev(),
w->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate());
}
// handle the bias here
}
Spiking::Spiking(std::string name)
{
m_name = name;
ConfigSpiking* config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
SpikingLayerBase * preLayer = (SpikingLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getSpikingOutputs();
inputs_time = preLayer->getSpikingTimeOutputs();
inputs_time_format = new cuMatrix<int>(inputs_time->rows, inputs_time->cols * inputs_time->channels, 1);
preDelta = preLayer->getCurDelta();
preDelta_format = NULL;
if(preDelta != NULL)
preDelta_format = new cuMatrix<float>(preDelta->rows, preDelta->cols * preDelta->channels, 1);
preFireCount = preLayer->getFireCount();
preFireCount_format = new cuMatrix<int>(preFireCount->rows, preFireCount->cols * preFireCount->channels, 1);
endTime = Config::instance()->getEndTime();
batch = Config::instance()->getBatchSize();
lambda = Config::instance()->getLambda();
beta = Config::instance()->getBeta();
T_REFRAC = config->m_t_ref;
TAU_M = config->m_tau_m;
TAU_S = config->m_tau_s;
inputSize = inputs->cols * inputs->channels / endTime;
outputSize = config->m_numNeurons;
weightLimit = Config::instance()->getWeightLimit();
UNDESIRED_LEVEL = config->m_undesired_level;
DESIRED_LEVEL = config->m_desired_level;
MARGIN = config->m_margin;
outputs = new cuMatrix<bool>(batch, outputSize * endTime, 1);
outputs_time = new cuMatrix<int>(batch, outputSize * endTime, 1);
// for fast input response
inputs_resp_tmp = new cuMatrix<float>(outputSize, endTime * batch, 1);
inputs_resp = new cuMatrix<float>(batch, outputSize * endTime, 1);
inputs_float = new cuMatrix<float>(inputSize, endTime * batch, 1);
curDelta = new cuMatrix<float>(batch, outputSize, 1);
fireCount= new cuMatrix<int>(batch, outputSize, 1);
weightSqSum = new cuMatrix<float>(outputSize, 1, 1);
maxCount = new cuMatrix<int>(batch, 1, 1);
accEffect = new cuMatrix<float>(batch, outputSize * inputSize, 1);
sideEffect = new cuMatrix<float>(batch, outputSize, 1);
predict = NULL;
// only for the output
if(config->m_name == std::string("output")){
groundTruth = new cuMatrix<float>(batch, outputSize, 1);
cost = new cuMatrix<float>(1, 1, 1);
}
else{
groundTruth = NULL;
cost = NULL;
}
assert(outputSize > 0 && inputSize > 0);
w = new cuMatrix<float>(outputSize, inputSize, 1);
b = new cuMatrix<float>(outputSize, 1, 1);
wgrad = new cuMatrix<float>(outputSize, inputSize, 1);
bgrad = new cuMatrix<float>(outputSize, 1, 1);
wgradTmp = new cuMatrix<float>(batch, outputSize * inputSize, 1);
if(config->hasLaterialWeight() == true){
w_laterial = new cuMatrix<float>(outputSize, outputSize, 1);
}
else
w_laterial = NULL;
threshold = config->m_vth;
// lateral inihibition factor for the output
lateralFactor = NULL;
lateralW = 0.0f;
if(config->hasLaterialInh() == true && config->m_name == std::string("output")){
lateralFactor = new cuMatrix<float>(batch, outputSize, 1);
lateralW = config->m_localInbStrength;
}
// use the e^k_{i|j} / o^{k-1}_j for estimating the grad of effect w.r.t to fire count
// notice that this variable is w[i][j] * e^k_{i|j} / o^{k-1}_j !
effectRatio = NULL;
if(Config::instance()->useEffectRatio()){
if(batch > 1){
printf("Must set batch size to 1 if use effect ratio for grad of synaptic effect.\n");
printf("Current batch size: %d\n", batch);
assert(batch <= 1);
}
effectRatio = new cuMatrix<float>(outputSize, inputSize, 1);
}
momentum_w = new cuMatrix<float>(outputSize, inputSize, 1);
momentum_b = new cuMatrix<float>(outputSize, 1, 1);
g1_w = new cuMatrix<float>(outputSize, inputSize, 1); // for adam
g1_b = new cuMatrix<float>(outputSize, 1, 1);
g2_w = new cuMatrix<float>(outputSize, inputSize, 1);
g2_b = new cuMatrix<float>(outputSize, 1, 1);
b1_t = 0.9;
b2_t = 0.999;
this->initRandom();
w_ref = NULL;
w_laterial_ref = NULL;
b_ref = NULL;
if(Config::instance()->getIsGradientChecking())
this->loadRef(); // for verification purpose
Layers::instance()->set(m_name, this);
}
void Spiking::save(FILE* file)
{
w->toCpu();
b->toCpu();
for(int c = 0; c < w->channels; c++){
for(int i = 0; i < w->rows; i++){
for(int j = 0; j < w->cols; j++){
fprintf(file, "%f ", w->get(i, j, c));
}
}
}
if(w_laterial != NULL){
for(int c = 0; c < w_laterial->channels; c++){
for(int i = 0; i < w_laterial->rows; i++){
for(int j = 0; j < w_laterial->cols; j++){
fprintf(file, "%f ", w_laterial->get(i, j, c));
}
}
}
}
for(int c = 0; c < b->channels; c++){
for(int i = 0; i < b->rows; i++){
for(int j = 0; j < b->cols; j++){
fprintf(file, "%f ", b->get(i, j, c));
}
}
}
}
void Spiking::clearMomentum()
{
momentum_b->gpuClear();
momentum_w->gpuClear();
}
void Spiking::verify(const std::string& phrase)
{
printf("Verify for the layer: %s at %s phrase.\n", m_name.c_str(), phrase.c_str());
if(phrase == std::string("train"))
{
if(!output_train_ref.empty()){
outputs->toCpu();
checkMatrixIsSame(output_train_ref[0], outputs, outputSize);
}
}
else if(phrase == std::string("test"))
{
if(w_ref != NULL){
w->toCpu();
checkMatrixIsSame(w_ref, w);
}
if(w_laterial_ref != NULL && w_laterial != NULL){
w_laterial->toCpu();
checkMatrixIsSame(w_laterial_ref, w_laterial);
}
if(b_ref != NULL){
b->toCpu();
checkMatrixIsSame(b_ref, b);
}
if(!output_test_ref.empty()){
outputs->toCpu();
checkMatrixIsSame(output_test_ref[0], outputs, outputSize);
}
}
printf("Verification for the layer: %s at %s phrase. Pased!!\n", m_name.c_str(), phrase.c_str());
}
//* load the reference weights and output spikes for verification
void Spiking::loadRef()
{
if(batch != 1){
printf("Only do the verification for one batch and one sample!\n");
exit(0);
}
ConfigSpiking * config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
if(config->m_ref_weight_path != std::string("NULL")){
w_ref = new cuMatrix<float>(outputSize, inputSize, 1);
initFromDumpfile(config->m_ref_weight_path, w_ref);
if(config->hasBias()){
b_ref = new cuMatrix<float>(outputSize, 1, 1);
initBiasFromDumpfile(config->m_ref_weight_path, b_ref);
}
}
if(config->m_ref_lweight_path != std::string("NULL")){
w_laterial_ref = new cuMatrix<float>(outputSize, outputSize, 1);
initFromDumpfile(config->m_ref_lweight_path, w_laterial_ref);
}
if(config->m_ref_output_train_path != std::string("NULL")){
read_each_speech_dump(config->m_ref_output_train_path, output_train_ref, endTime, outputSize);
assert(output_train_ref.size() == 1 && output_train_ref[0] != NULL);
output_train_ref[0]->rows = 1;
output_train_ref[0]->cols = endTime * outputSize;
}
if(config->m_ref_output_test_path != std::string("NULL")){
read_each_speech_dump(config->m_ref_output_test_path, output_test_ref, endTime, outputSize);
assert(output_test_ref.size() == 1 && output_test_ref[0] != NULL);
output_test_ref[0]->rows = 1;
output_test_ref[0]->cols = endTime * outputSize;
}
}
void Spiking::initRandom()
{
ConfigSpiking * config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
float initW = config->m_initW;
if(config->isGaussian()){
float epsilon = initW;
for(int c = 0; c < w->channels; c++)
{
createGaussian(w->getHost() + c * w->getArea(),
outputSize, inputSize, w->channels, epsilon);
}
w->toGpu();
}
else if(config->isBernoulli()){
for(int j = 0; j < w->getLen(); j++){
w->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w->getHost()[j]);
}//printf("\n");
w->toGpu();
}
else if(config->isFixed()){
// one input connects to nconnect randomly selected outputs, with initW/-initW
int nconnect = config->m_weightConnect;
assert(nconnect > 0);
for(int c = 0; c < w->channels; ++c){
for(int i = 0; i < w->rows; ++i){
for(int t = 0; t < nconnect; ++t){
int j = rand() % inputSize;
if(rand() % 2 == 0)
w->set(i, j, c, initW);
else
w->set(i, j, c, -1.0*initW);
//printf("input_%d to reservoir_%d : %f\n", j, i, w->get(i, j, c));
}
}
}
w->toGpu();
}
else if(config->isExternal()){
initFromDumpfile(config->m_weightPath, w);
}
if(config->hasLaterialWeight()){
initLaterial();
}
}
void Spiking::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int c = 0; c < w->channels; c++){
for(int i = 0; i < w->rows; i++){
for(int j = 0; j < w->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
char logStr[256];
sprintf(logStr, "scanf fail for layer: %s\n", m_name.c_str());
LOG(logStr, "Result/log.txt");
assert(0);
}
w->set(i, j, c, val);
}
}
}
if(w_laterial != NULL){
for(int c = 0; c < w_laterial->channels; c++){
for(int i = 0; i < w_laterial->rows; i++){
for(int j = 0; j < w_laterial->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
char logStr[256];
sprintf(logStr, "scanf fail for layer: %s\n", m_name.c_str());
LOG(logStr, "Result/log.txt");
}
w_laterial->set(i, j, c, val);
}
}
}
}
for(int c = 0; c < b->channels; c++){
for(int i = 0; i < b->rows; i++){
for(int j = 0; j < b->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
char logStr[256];
sprintf(logStr, "scanf fail for layer: %s\n", m_name.c_str());
LOG(logStr, "Result/log.txt");
assert(0);
}
b->set(i, j, c, val);
}
}
}
w->toGpu();
b->toGpu();
}
//* initial the weights from the dumped file by the CPU sim
void Spiking::initFromDumpfile(const std::string& filename, cuMatrix<float>*& cuW)
{
ifstream f_in(filename.c_str());
if(!f_in.is_open()){
printf("Cannot open the file: %s\n", filename.c_str());
exit(EXIT_FAILURE);
}
assert(cuW != NULL);
std::vector<std::vector<float> > weights(cuW->rows, std::vector<float>(cuW->cols, 0.0f));
int idx;
float weight;
std::string pre_name, post_name;
while(f_in>>idx>>pre_name>>post_name>>weight){
int pre = extractNeuronIndex(pre_name);
int post = extractNeuronIndex(post_name);
if(post >= weights.size() || pre >= weights[0].size()){
if(pre == weights[0].size() && post < weights.size()){ // this is related to bias
continue;
}
else{
printf("Read the file: %s, in line: %d\n", filename.c_str(), idx);
printf("Post: %d, OutputDim: %d\n Pre: %d, InputDim: %d\n", post, (int)weights.size(), pre, (int)weights[0].size());
assert(post < weights.size() && pre < weights[0].size());
}
}
weights[post][pre] += weight;
}
for(int c = 0; c < cuW->channels; c++){
for(int i = 0; i < cuW->rows; i++){
for(int j = 0; j < cuW->cols; j++){
cuW->set(i, j, c, weights[i][j]);
}
}
}
cuW->toGpu();
// verify that the weights is correctly copied!
for(int i = 0; i < weights.size(); ++i){
for(int j = 0; j < weights[0].size(); ++j){
assert(fabsf(cuW->get(i, j, 0) - weights[i][j]) < 1e-4);
}
}
}
//* initial the bias weights from the dumped file by the CPU sim
void Spiking::initBiasFromDumpfile(const std::string& filename, cuMatrix<float>*& cuW)
{
ifstream f_in(filename.c_str());
if(!f_in.is_open()){
printf("Cannot open the file: %s\n", filename.c_str());
exit(EXIT_FAILURE);
}
assert(cuW != NULL);
int idx;
float weight;
std::string pre_name, post_name;
while(f_in>>idx>>pre_name>>post_name>>weight){
int pre = extractNeuronIndex(pre_name);
int post = extractNeuronIndex(post_name);
if(pre == inputSize && post < outputSize){ // this is related to bias
cuW->set(post, 0, 0, weight);
}
}
cuW->toGpu();
}
void Spiking::initLaterial()
{
ConfigSpiking* config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
if(config->m_laterialType == "RESERVOIR"){
initFromDumpfile(config->m_lweightPath, w_laterial);
//initReservoirConnection(config->m_reservoirDim);
}
else if(config->m_laterialType == "LOCAL_INHIBITION"){
initLocalInhibition(config->m_localInbStrength);
}
}
// intialize the reservoir connections
// TODO: improve the randomness of the reservoir (the bad random seed we used now!)
void Spiking::initReservoirConnection(const std::vector<int>& reservoirDim)
{
assert(reservoirDim.size() == 3);
assert(w_laterial != NULL);
int d1 = reservoirDim[0], d2 = reservoirDim[1], d3 = reservoirDim[2];
int num = d1 * d2 * d3;
if(num != outputSize){
printf("The reservoir dim: %d x %d x %d = %d does not match the number neuron: %d!\n",d1, d2, d3, num, outputSize);
exit(EXIT_FAILURE);
}
// adopted from the CPU code:
srand(5);
std::vector<bool> excitatory(num, false);
std::vector<dim3> coordinates;
for(int i = 0; i < excitatory.size(); ++i){
if(rand() % 100 < 20) excitatory[i] = false;
else excitatory[i] = true;
}
for(int i = 0; i < d1; ++i){
for(int j = 0; j < d2; ++j){
for(int k = 0; k < d3; ++k){
int index = (i * d2 + j) * d3 + k;
assert(index < excitatory.size());
coordinates.push_back(dim3(i, j, k));
}
}
}
double c, a;
double distsq, dist;
const double factor2 = 1.5;
for(int i = 0; i < num; ++i){
for(int j = 0; j < num; ++j){
if(excitatory[i]){
if(excitatory[j]){
c = 0.3 * factor2;
a = 1;
}
else{
c = 0.2 * factor2;
a = 1;
}
}
else{
if(excitatory[j]){
c = 0.4 * factor2;
a = -1;
}
else{
c = 0.1 * factor2;
a = -1;
}
}
distsq = 0;
dist = coordinates[i].x - coordinates[j].x;
distsq += dist * dist;
dist = coordinates[i].y - coordinates[j].y;
distsq += dist * dist;
dist = coordinates[i].z - coordinates[j].z;
distsq += dist * dist;
if(rand() % 100000 < 100000 * c * exp(-distsq / 4)){
//printf("reservoir_%d to reservoir_%d %f\n", i , j, a);
w_laterial->set(j, i, 0, a);
}
}
}
w_laterial->toGpu();
}
void Spiking::initLocalInhibition(float strength)
{
assert(w_laterial != NULL);
for(int c = 0; c < w_laterial->channels; c++){
for(int i = 0; i < w_laterial->rows; i++){
for(int j = 0; j < w_laterial->cols; j++){
if(i == j) continue;
w_laterial->set(i, j, c, -1*strength);
}
}
}
w_laterial->toGpu();
}
/* the device function to realize: weights * spikes(:, t - 1) + recurrent_weights * o_spikes(t - 1)
* I only consider the first order dynamics
* inputSize : number of input neurons
* outputSize : number of output neurons
*/
__device__ float d_Spiking_accumulate_spikes(
int inputSize,
int outputSize,
float* input_response,
bool* output,
int o_idx,
float* weights,
float* weights_lat,
float* biases,
int t,
int dummyFreq,
int endTime)
{
int idx = threadIdx.x;
if(idx >= outputSize * inputSize){
return 0;
}
float response = 0.0f;
// effect from the forward-connects
response = input_response[(t - 1) + o_idx * endTime];
// effect from the bias
if(t % dummyFreq == 0){
response += biases[idx];
}
if(weights_lat != NULL){
// effect from the recurrent connections:
for(int i = 0; i < outputSize; ++i)
response += output[i + (t - 1) * outputSize] ? weights_lat[i + o_idx * outputSize] : 0;
}
return response;
}
/* given each input and output spike train,
* compute the accumulative synaptic effect as the gradient
* input: input spikes: endTime * inputSize
* output: output spikes: endTime * outputSize
*/
__device__ float d_Spiking_gradient(
bool* output,
bool* input,
float delta,
int o_idx,
int i_idx,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
float acc_response = 0.0f;
int t_post_last = 1;
for(int t_post = 1; t_post < endTime; t_post++){
if(output[o_idx + t_post * outputSize] != true) continue;
float sum = 0.0f;
int ub = t_post;
int lb = max(1, int(t_post - 4*TAU_M));
for(int t_pre = lb; t_pre < ub; ++t_pre){
if(input[i_idx + t_pre * inputSize] != true) continue;
int pre_time = t_pre + T_REFRAC;
if(pre_time > t_post) continue;
int s = t_post - t_post_last;
int t = t_post - pre_time;
float factor = exp(-1*max(t - s, 0)/TAU_S)/(1 - TAU_S/TAU_M);
sum += factor * (exp(-1*min(s, t)/TAU_M) - exp(-1*min(s, t)/TAU_S));
}
t_post_last = t_post + T_REFRAC;
acc_response += sum;
}
float delta_w = delta * acc_response;
return delta_w;
}
/* given each input and output spike train of spike times,
* compute the accumulative synaptic effect as the gradient
* input: input spikes: endTime * inputSize
* output: output spikes: endTime * outputSize
*/
__device__ float d_Spiking_gradient_spiketime(
int* output_time,
int* input_time,
int n_ospikes,
int n_ispikes,
float delta,
int o_idx,
int i_idx,
float lat_factor,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
float acc_response = d_Spiking_accumulate_effect(output_time, input_time, n_ospikes, n_ispikes, o_idx, i_idx, outputSize, inputSize, endTime, T_REFRAC, TAU_M, TAU_S);
float delta_w = delta * acc_response * lat_factor;
return delta_w;
}
/* compute the gradient for the bias
* input: input spikes: endTime * inputSize
* output: output spikes: endTime * outputSize
*/
__device__ float d_Spiking_bias_gradient_spiketime(
int* output_time,
int n_ospikes,
float delta,
int o_idx,
int dummyFreq,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
float acc_response = 0.0f;
int t_post_last = 1;
for(int i = 0; i < n_ospikes; ++i){
int t_post = output_time[o_idx * endTime + i];
float sum = 0.0f;
int ub = t_post;
int lb = max(1, int(t_post - 4*TAU_M));
for(int j = dummyFreq; j < endTime; j += dummyFreq){
int t_pre = j;
if(t_pre < lb || t_pre >= ub) continue;
int pre_time = t_pre + T_REFRAC;
if(pre_time > t_post) continue;
int s = t_post - t_post_last;
int t = t_post - pre_time;
float factor = exp(-1*max(t - s, 0)/TAU_S)/(1 - TAU_S/TAU_M);
sum += factor * (exp(-1*min(s, t)/TAU_M) - exp(-1*min(s, t)/TAU_S));
}
t_post_last = t_post + T_REFRAC;
acc_response += sum;
}
float delta_b = delta * acc_response;
return delta_b;
}
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getCost_output(
int* fireCount,
float* groundTruth,
float* cost,
int* y,
int batch,
int cols,
float UNDESIRED_LEVEL,
float DESIRED_LEVEL,
float MARGIN)
{
extern __shared__ float _sum[];
int len = batch * cols;
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len){
groundTruth[id] = UNDESIRED_LEVEL;
}
}
__syncthreads();
for(int i = 0; i < batch; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < batch){
int yy = y[id];
groundTruth[id * cols + yy] = DESIRED_LEVEL;
}
}
_sum[threadIdx.x] = 0;
__syncthreads();
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
float diff = fabsf(float(fireCount[id]) - groundTruth[id]);
_sum[threadIdx.x] += diff > MARGIN ? diff * diff : 0;
}
}
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1)>>1;
if(threadIdx.x < skip && (threadIdx.x + skip) < len)
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = skip;
}
__syncthreads();
if(threadIdx.x == 0)
{
cost[0] = _sum[0];
}
}
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getDelta_output(float* outputDelta, int* fireCount, float* groundTruth, int len, float MARGIN)
{
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
float diff = fabsf(float(fireCount[id]) - groundTruth[id]);
outputDelta[id] = diff > MARGIN ? fireCount[id] - groundTruth[id] : 0;
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(outputSize);
*/
__global__ void g_boostWeight_output(float* outputDelta, float* sample_weights, int len)
{
int batchId = blockIdx.x;
float sample_weight = sample_weights[batchId];
int outputSize = blockDim.x;
int tid = threadIdx.x;
int target = tid + batchId * outputSize;
if(target < len)
outputDelta[target] *= sample_weight;
}
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_getLateralFactor_output(
int* outputs_time,
int* batchFireCount,
float w0,
int* y,
float* batchLFactor,
float vth,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float d_sum[];
int tid = threadIdx.x;
d_sum[tid] = 0;
__syncthreads();
int batchId = blockIdx.x;
int j_idx = blockIdx.y;
int outputSize2 = endTime * outputSize;
int* output_time = outputs_time + batchId * outputSize2;
int* output_fireCount = batchFireCount + batchId * outputSize;
int cls = y[batchId];
float * lateral_factors = batchLFactor + batchId * outputSize;
int f_cnt_j = output_fireCount[j_idx];
float d_j = (f_cnt_j > 0 || (f_cnt_j == 0 && j_idx == cls)) ? 1 / vth : 0;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int l_idx = i + tid;
if(l_idx < outputSize && j_idx != l_idx)
{
int f_cnt_l = output_fireCount[l_idx];
float d_l = (f_cnt_l > 0 || (f_cnt_l == 0 && l_idx == cls)) ? 1 / vth : 0;
// j --> l
float e_jl = d_Spiking_accumulate_effect(output_time, output_time, f_cnt_l, f_cnt_j, l_idx, j_idx, outputSize, outputSize, endTime, T_REFRAC, TAU_M, TAU_S);
float effect_ratio_jl = (f_cnt_j == 0 || f_cnt_l == 0) ? 1 : e_jl / f_cnt_j;
// l --> j
float e_lj = d_Spiking_accumulate_effect(output_time, output_time, f_cnt_j, f_cnt_l, j_idx, l_idx, outputSize, outputSize, endTime, T_REFRAC, TAU_M, TAU_S);
float effect_ratio_lj = (f_cnt_l == 0 || f_cnt_j == 0) ? 1 : e_lj / f_cnt_l;
d_sum[tid] += effect_ratio_jl * d_l * effect_ratio_lj * d_j;
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
d_sum[tid] += d_sum[tid + skip];
}
len = skip;
}
if(tid == 0)
{
lateral_factors[j_idx] = 1.0f / (1 - d_sum[0] * w0 * w0);
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(outputSize);
*/
__global__ void g_getMaxCount(int* fireCount, int* maxCount, int cols)
{
extern __shared__ int _max[];
int batchId = blockIdx.x;
int len = blockDim.x;
int id = threadIdx.x;
_max[id] = 0;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int ttid = tid + id;
if(ttid < cols){
_max[threadIdx.x] = max(_max[threadIdx.x], fireCount[ttid + batchId * cols]);
}
}
_max[id] = fireCount[id + batchId * cols];
while(len != 1)
{
__syncthreads();
int skip = (len + 1)>>1;
if(id < skip && (id + skip) < len)
{
_max[id] = max(_max[id], _max[id + skip]);
}
len = skip;
}
__syncthreads();
if(id == 0)
{
maxCount[batchId] = _max[0];
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(1024, outputSize));
*/
__global__ void g_modifySpikes(bool* outputs, int* y, int* fireCount, int target_level, int endTime, int outputSize)
{
int batchId = blockIdx.x;
int target = y == NULL ? -1 : y[batchId];
int mCnt = target_level;
bool* outputSpikes = outputs + batchId * endTime * outputSize;
for(int id = 0; id < outputSize; id += blockDim.x){
int o_idx = id + threadIdx.x;
if(o_idx < outputSize)
{
if(o_idx != target)
return;
if(fireCount[o_idx + batchId * outputSize] == 0)
{
int count = 0;
int interval = endTime / mCnt;
for(int t = interval; t < endTime; t += interval)
{
outputSpikes[o_idx + t * outputSize] = true;
count++;
}
fireCount[o_idx + batchId * outputSize] = count;
}
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(outputSize, 1024));
*/
__global__ void g_Spiking_feedforward(
float* inputs_resp,
float* w,
float* w_l,
float* b,
bool* outputs,
int* fireCount,
int inputSize,
int outputSize,
int endTime,
float vth,
int dummyFreq,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int outputSize2 = endTime * outputSize;
bool* curOutput = outputs + batchId * outputSize2;
float* curInput = inputs_resp + batchId * outputSize2;//inputs_resp:batch * outputSize*endTime
int* curFireCount = fireCount + batchId * outputSize;
// simulate the spiking train
for(int tidx = 0; tidx < outputSize; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize)
{
float v = 0.0f;
float ep = 0.0f;
float threshold = vth - 1e-6; // migitate the numerical disparity due to fast response
int t_ref= 0;
float response = 0.0f;
int fire_count = 0;
for(int t = 0; t < endTime; t++){
// 1. leakage
v -= v / TAU_M;
ep -= ep / TAU_S;
if(t == 0)
{
curOutput[o_idx + t * outputSize] = false;
continue;
}
// 2. receive the spike inputs
__syncthreads(); // make sure all the threads has generated the spikes for the last time step
response = d_Spiking_accumulate_spikes(inputSize, outputSize, curInput, curOutput, o_idx, w, w_l, b, t, dummyFreq, endTime);
// 3. Add up the response to ep (state variable)
ep += response;
// 4. Update the vmem accordingly
v += ep/TAU_S;
if(t_ref > 0){
v = 0;
t_ref--;
}
// 5. Fire or not
curOutput[o_idx + t * outputSize] = v > threshold ? true : false;
t_ref = v > threshold ? T_REFRAC : t_ref;
fire_count += v > threshold ? 1 : 0;
v = v > threshold ? 0 : v;
}
curFireCount[o_idx] = fire_count;
}
}
}
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_wgrad(
bool* inputs,
bool* outputs,
float* curDelta,
float* wgradTmp,
int inputSize,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int i_idx = blockIdx.y;
int wSize = outputSize * inputSize;
int inputSize2 = endTime * inputSize;
int outputSize2 = endTime * outputSize;
int curDeltaSize = outputSize;
float* wgrad = wgradTmp + batchId * wSize;
bool* input = inputs + batchId * inputSize2;
bool* output = outputs + batchId * outputSize2;
float* cDelta = curDelta + batchId * curDeltaSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
float delta_w = d_Spiking_gradient(output, input, cDelta[o_idx], o_idx, i_idx, outputSize, inputSize, endTime, T_REFRAC, TAU_M, TAU_S);
wgrad[i_idx + o_idx * inputSize] = delta_w;
}
}
}
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_sideEffect(
float* weights,
int* batchFireCount,
float* batchAccEffect,
float vth,
int inputSize,
int outputSize,
float * batchSideEffect)
{
int batchId = blockIdx.x;
int o_idx = blockIdx.y;
int tid = threadIdx.x;
extern __shared__ float _sum[];
_sum[tid] = 0;
__syncthreads();
int wSize = outputSize * inputSize;
int* fireCount = batchFireCount + batchId * outputSize;
float* acc_effect= batchAccEffect + batchId * wSize;
float* side_effect = batchSideEffect + batchId * outputSize;
int o_cnt = fireCount[o_idx];
for(int i = 0; i < inputSize; i += blockDim.x)
{
int idx = i + tid;
if(idx < inputSize)
{
float w = weights[idx + o_idx * inputSize];
float e = acc_effect[idx + o_idx * inputSize];
float ratio = o_cnt == 0 ? 0.5 : e/o_cnt;
_sum[tid] += w * ratio;
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0){
side_effect[o_idx] = _sum[0]/vth;
}
}
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_spiketime(
float* batchSideEffect,
float* batchAccEffect,
float* curDelta,
float* latFactor,
float* wgradTmp,
int inputSize,
int outputSize)
{
int batchId = blockIdx.x;
int o_idx = blockIdx.y;
int tid = threadIdx.x;
int wSize = outputSize * inputSize;
int curDeltaSize = outputSize;
float* wgrad = wgradTmp + batchId * wSize;
float* acc_effect = batchAccEffect + batchId * wSize;
float* side_effect = batchSideEffect + batchId * outputSize;
float* cDelta = curDelta + batchId * curDeltaSize;
float* lFactor = latFactor == NULL ? NULL : latFactor + batchId * curDeltaSize;
float s_effect = side_effect[o_idx];
float latFac = lFactor == NULL ? 1.0f : lFactor[o_idx];
float delta = cDelta[o_idx];
for(int i = 0; i < inputSize; i += blockDim.x)
{
int i_idx = i + tid;
if(i_idx < inputSize)
{
float compen_effect = acc_effect[i_idx + o_idx * inputSize] * (1 + s_effect);
float delta_w = delta * compen_effect * latFac;
wgrad[i_idx + o_idx * inputSize] = delta_w;
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(1024, outputSize));
*/
__global__ void g_Spiking_bgrad_spiketime(
int* outputs_time,
int* batchFireCount,
float* curDelta,
float* bgradTmp,
int outputSize,
int endTime,
int dummyFreq,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int bSize = outputSize;
int outputSize2 = endTime * outputSize;
int curDeltaSize = outputSize;
float* bgrad = bgradTmp + batchId * bSize;
int* output_time = outputs_time + batchId * outputSize2;
int* output_fireCount = batchFireCount + batchId * outputSize;
float* cDelta = curDelta + batchId * curDeltaSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
float delta_b = d_Spiking_bias_gradient_spiketime(output_time, output_fireCount[o_idx], cDelta[o_idx], o_idx, dummyFreq, outputSize, endTime, T_REFRAC, TAU_M, TAU_S);
bgrad[o_idx] = delta_b;
}
}
}
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_synaptic_effect(
int* inputs_time,
int* outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* w,
float* batchAccEffect,
float* effectRatio,
int inputSize,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int i_idx = blockIdx.y;
int wSize = outputSize * inputSize;
int inputSize2 = endTime * inputSize;
int outputSize2 = endTime * outputSize;
int* input_time = inputs_time + batchId * inputSize2;
int* output_time = outputs_time + batchId * outputSize2;
int* input_fireCount = batchPreFireCount + batchId * inputSize;
int* output_fireCount = batchFireCount + batchId * outputSize;
float* acc_effect = batchAccEffect + batchId * wSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
float e = d_Spiking_accumulate_effect(output_time, input_time, output_fireCount[o_idx], input_fireCount[i_idx], o_idx, i_idx, outputSize, inputSize, endTime, T_REFRAC, TAU_M, TAU_S);
acc_effect[i_idx + o_idx * inputSize] = e;
if(effectRatio != NULL){
int o_cnt = output_fireCount[o_idx];
int i_cnt = input_fireCount[i_idx];
float ratio = i_cnt == 0 || o_cnt == 0 ? 1 : e / float(i_cnt);
effectRatio[i_idx + o_idx * inputSize] = ratio * w[i_idx + o_idx * inputSize];
}
}
}
}
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_debug_spiketime(
int* inputs_time,
int* outputs_time,
int* batchPreFireCount,
int* batchFireCount,
int inputSize,
int outputSize,
int endTime)
{
int batchId = blockIdx.x;
int i_idx = blockIdx.y;
int inputSize2 = endTime * inputSize;
int outputSize2 = endTime * outputSize;
int* input_time = inputs_time + batchId * inputSize2;
int* output_time = outputs_time + batchId * outputSize2;
int* input_fireCount = batchPreFireCount + batchId * outputSize;
int* output_fireCount = batchFireCount + batchId * outputSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
if(i_idx == I_IDX && o_idx == O_IDX){
printf("Input %d fires: ", i_idx);
for(int i = 0; i < input_fireCount[i_idx]; i++) printf("%d\t", input_time[i_idx * endTime + i]);
printf("\n");
printf("Output %d fires: ", o_idx);
for(int j = 0; j < output_fireCount[o_idx]; j++) printf("%d\t", output_time[o_idx * endTime + j]);
printf("\n");
}
}
}
}
| c389fed9b8c85f086c2d84aaccca2bc10a2f567a.cu | #include "Spiking.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../common/util.h"
#include "../readData/readSpeechData.h"
#include <fstream>
#include <assert.h>
#include <math.h>
//#define DEBUG
#define I_IDX 0
#define O_IDX 0
/*
* Device func for accumulate the spike response
*
*/
__device__ float d_Spiking_accumulate_spikes(
int inputSize,
int outputSize,
float* input_resp,
bool* output,
int o_idx,
float* weights,
float* weights_lat,
float* biases,
int t,
int dummyFreq,
int endTime);
/*
* Device func for spike gradient for each pair of binary spike response
*/
__device__ float d_Spiking_gradient(
bool* output,
bool* input,
float delta,
int o_idx,
int i_idx,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* Device func for spike gradient for each pair of spike train in times
*/
__device__ float d_Spiking_gradient_spiketime(
int* output_time,
int* input_time,
int n_ospikes,
int n_ispikes,
float delta,
int o_idx,
int i_idx,
float lat_factor,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* Device func for spike gradient for bias in times
*/
__device__ float d_Spiking_bias_gradient_spiketime(
int* output_time,
int n_ospikes,
float delta,
int o_idx,
int dummyFreq,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getCost_output(
int* fireCount,
float* groundTruth,
float* cost,
int* y,
int batch,
int cols,
float UNDESIRED_LEVEL,
float DESIRED_LEVEL,
float MARGIN);
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getDelta_output(
float* outputDelta,
int* fireCount,
float* groundTruth,
int len,
float MARGIN);
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(1024, outputSize));
*/
__global__ void g_getMaxCount(
int* fireCount,
int* maxCount,
int cols);
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_wgrad(
bool* inputs,
bool* outputs,
float* curDelta,
float* wgradTmp,
int inputSize,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_sideEffect(
float* weights,
int* batchFireCount,
float* batchAccEffect,
float vth,
int inputSize,
int outputSize,
float * batchSideEffect);
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_spiketime(
float* batchSideEffect,
float* batchAccEffect,
float* curDelta,
float* latFactor,
float* wgradTmp,
int inputSize,
int outputSize);
/*
* dim3 block = dim3(outputSize);
* dim3 thread= dim3(batch);
*/
__global__ void g_Spiking_bgrad_spiketime(
int* outputs_time,
int* batchFireCount,
float* curDelta,
float* bgradTmp,
int outputSize,
int endTime,
int dummyFreq,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* block = dim3(outputSize * inputSize);
* thread= dim3(batch);
*/
__global__ void g_Spiking_gradAdd(
float* wgradTmp,
float* wgrad,
float* w,
float* w_sq_sum,
int batch,
float lambda,
float beta,
float limit,
int inputSize,
int wArea);
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_debug_spiketime(
int* inputs_time,
int* outputs_time,
int* batchPreFireCount,
int* batchFireCount,
int inputSize,
int outputSize,
int endTime);
void Spiking::calCost()
{
cost->gpuClear();
if(predict == NULL){
printf("Warning::Try to compute the cost when the predict is not properly set!\n ");
return;
}
g_getCost_output<<<dim3(1), dim3(256), sizeof(float) * 256>>>(fireCount->getDev(),
groundTruth->getDev(),
cost->getDev(),
predict,
batch,
fireCount->cols,
UNDESIRED_LEVEL,
DESIRED_LEVEL,
MARGIN);
cudaStreamSynchronize(0);
getLastCudaError("Spiking:g_getCost_output");
}
void Spiking::feedforward()
{
if((inputs == NULL))
{
printf("Spiking init error\n");
exit(0);
}
// fast input response
g_cast_bool_2_float<<<dim3(batch, endTime), min(1024, inputSize)>>>(inputs->getDev(), endTime, inputSize, inputs->cols, inputs->channels, batch, inputs_float->getDev());
matrixMul(w, inputs_float, inputs_resp_tmp); //input_resp_tmp rows:outputSize; cols:endTime*batch
g_transform_2_batch<<<dim3(batch, outputSize), min(1024, endTime)>>>(inputs_resp_tmp->getDev(), endTime, outputSize, batch, inputs_resp->getDev());
// convert (batch, inputDim2*endTime, amount) to (batch, amount*inputDim2*endTime, 1)
g_convert_spiketimes<<<dim3(batch, endTime), min(1024, inputSize)>>>(inputs_time->getDev(), endTime, inputSize, inputs_time->cols, inputs_time->channels, batch, inputs_time_format->getDev());
// convert (batch, inputDim2, amount) to (batch, amount*inputDim2, 1)
g_convert_firecounts<<<dim3(batch), min(1024, inputSize)>>>(preFireCount->getDev(), preFireCount->getArea(), inputSize, preFireCount->cols, preFireCount->channels, batch, preFireCount_format->getDev());
dim3 thread= dim3(min(1024, outputSize));
dim3 block = dim3(batch);
ConfigSpiking * config = (ConfigSpiking*) Config::instance()->getLayerByName(m_name);
int dummyFreq = config->getBiasFreq();
g_Spiking_feedforward<<<block, thread>>>(
inputs_resp->getDev(),
w->getDev(),
w_laterial == NULL ? NULL : w_laterial->getDev(),
b->getDev(),
outputs->getDev(),
fireCount->getDev(),
inputSize,
outputSize,
endTime,
threshold,
dummyFreq,
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("Spiking::g_Spiking_feedforward");
block = dim3(batch, 1);
thread = dim3(min(outputSize, 1024));
// transform the binary response matrix to the spike times
g_response_2_spiketime<<<block, thread>>>(
outputs->getDev(),
outputs_time->getDev(),
outputs->getArea(),
outputSize,
endTime);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("Spiking:g_response_2_spiketime");
}
void Spiking::backpropagation()
{
if(m_name == std::string("output")){
// compute the cost function
g_getCost_output<<<dim3(1), dim3(256), sizeof(float) * 256>>>(fireCount->getDev(), groundTruth->getDev(), cost->getDev(), predict, batch, fireCount->cols, UNDESIRED_LEVEL, DESIRED_LEVEL, MARGIN);
cudaStreamSynchronize(0);
getLastCudaError("Spiking::g_getCost_output");
// compute the delta (error)
g_getDelta_output<<<dim3(1), dim3(256)>>>(curDelta->getDev(), fireCount->getDev(), groundTruth->getDev(), curDelta->getLen(), MARGIN);
cudaStreamSynchronize(0);
getLastCudaError("Spiking::g_getDelta_output");
// apply the sample weights
g_boostWeight_output<<<dim3(batch), dim3(outputSize)>>>(curDelta->getDev(), sample_weights, curDelta->getLen());
cudaStreamSynchronize(0);
getLastCudaError("Spiking::g_boostWeight_output");
// compute the lateral factors if applicable
if(lateralFactor != NULL && w_laterial != NULL){
int threads = min(outputSize, 1024);
g_getLateralFactor_output<<<dim3(batch, outputSize), threads, sizeof(float) * threads>>>(
outputs_time->getDev(),
fireCount->getDev(),
lateralW,
predict,
lateralFactor->getDev(),
threshold,
outputSize,
endTime,
T_REFRAC,
TAU_M,
TAU_S);
cudaStreamSynchronize(0);
getLastCudaError("Spiking::g_getLateralFactor_output");
}
// modify the output spikes of the target neuron if it does not fire
// tricky: modify both the spike trains and output fire counts!
g_modifySpikes<<<dim3(batch), dim3(min(outputSize, 1024))>>>(outputs->getDev(), predict, fireCount->getDev(), DESIRED_LEVEL, endTime, outputSize);
cudaStreamSynchronize(0);
getLastCudaError("Spiking::g_modifySpikes");
// retransform the binary matrix to the spike times since the outputs might be changed
g_response_2_spiketime<<<dim3(batch, 1), dim3(min(outputSize, 1024))>>>(
outputs->getDev(),
outputs_time->getDev(),
outputs->getArea(),
outputSize,
endTime);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("Spiking:g_response_2_spiketime");
}
// pre compute the accumulative synaptic effect, and effect ratio (if applicable)
dim3 thread = dim3(min(1024, outputSize));
dim3 block = dim3(batch, inputSize);
cudaFuncSetCacheConfig(g_Spiking_synaptic_effect, cudaFuncCachePreferL1);
g_Spiking_synaptic_effect<<<block, thread>>>(
inputs_time_format->getDev(),
outputs_time->getDev(),
preFireCount_format->getDev(),
fireCount->getDev(),
w->getDev(),
accEffect->getDev(),
effectRatio == NULL ? NULL : effectRatio->getDev(),
inputSize,
outputSize,
endTime,
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_Spiking_synaptic_effect");
// divide the curDelta by vth
block = dim3(batch, 1);
thread = dim3(min(1024, outputSize));
g_divide_by_threshold<<<block, thread>>>(curDelta->getDev(), curDelta->getArea(), curDelta->cols, threshold);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_divide_by_threshold");
// compute preDelta: curDelta: batch * outputSize; w: outputSize * inputSize
if(preDelta == NULL){
ConfigSpiking* config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
assert(config->m_input == "data");
}
else{
if(effectRatio != NULL){
matrixMul(curDelta, effectRatio, preDelta_format);
}
else{
matrixMul(curDelta, w, preDelta_format);
}
// preDelta_format: (batch, channels * size, 1) -> preDelta: (batch, size, channels)
block = batch;
thread = min(512, preDelta->channels * preDelta->cols);
g_preDeltaFormat<<<block, thread>>>(preDelta_format->getDev(), preDelta->getDev(),
preDelta->rows, preDelta->cols, preDelta->channels);
cudaStreamSynchronize(0);
getLastCudaError("g_preDeltaFormat");
}
}
/*
* block = dim3(outputSize, 1);
* thread= dim3(min(inputSize, 1024));
*/
__global__ void g_Spiking_calSquareSum(
float* w,
float* w_sq_sum,
int outputSize,
int inputSize,
float weight_limit)
{
extern __shared__ float _sum[];
int o_id = blockIdx.x;
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < inputSize; i += blockDim.x)
{
int id = i + tid;
if(id < inputSize)
{
int wid = id + o_id * inputSize;
float weight = w[wid];
_sum[tid] += (weight/weight_limit) * (weight/weight_limit);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
w_sq_sum[o_id] = _sum[0] / inputSize;
}
/*
* block = dim3(outputSize * inputSize);
* thread= dim3(batch);
*/
__global__ void g_Spiking_gradAdd(
float* wgradTmp,
float* wgrad,
float* w,
float* w_sq_sum,
int batch,
float lambda,
float beta,
float limit,
int inputSize,
int wArea)
{
extern __shared__ float _sum[];
int wid = blockIdx.x;
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < batch; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < batch)
{
_sum[threadIdx.x] += wgradTmp[b * wArea + wid];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
{
float sq_sum = w_sq_sum[wid / inputSize];
wgrad[wid] = _sum[0] / batch + lambda*beta*(w[wid]/limit)*__expf(beta*(sq_sum - 1));
}
}
void Spiking::getGrad()
{
dim3 thread = dim3(min(1024, inputSize));
dim3 block = dim3(batch, outputSize);
g_Spiking_wgrad_sideEffect<<<block, thread, sizeof(float) * min(1024, inputSize)>>>(
w->getDev(),
fireCount->getDev(),
accEffect->getDev(),
threshold,
inputSize,
outputSize,
sideEffect->getDev());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_Spiking_wgrad_sideEffect");
cudaFuncSetCacheConfig(g_Spiking_wgrad_spiketime,cudaFuncCachePreferL1);
g_Spiking_wgrad_spiketime<<<block, thread>>>(
sideEffect->getDev(),
accEffect->getDev(),
curDelta->getDev(),
lateralFactor == NULL ? NULL : lateralFactor->getDev(),
wgradTmp->getDev(),
inputSize,
outputSize);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_Spiking_wgrad_spiketime");
#ifdef DEBUG
g_Spiking_debug_spiketime<<<block, thread>>>(inputs_time->getDev(), outputs_time->getDev(), preFireCount->getDev(), fireCount->getDev(), inputSize, outputSize, endTime);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_Spiking_debug_spiketime");
#endif
block = dim3(outputSize);
thread = dim3(min(inputSize, 1024));
g_Spiking_calSquareSum<<<block, thread, sizeof(float) * min(inputSize, 1024)>>>(
w->getDev(),
weightSqSum->getDev(),
outputSize,
inputSize,
weightLimit);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_Spiking_calSquareSum");
block = dim3(outputSize * inputSize);
thread = dim3(batch);
g_Spiking_gradAdd<<<block, thread, sizeof(float) * batch>>>(
wgradTmp->getDev(),
wgrad->getDev(),
w->getDev(),
weightSqSum->getDev(),
batch,
lambda,
beta,
weightLimit,
inputSize,
w->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_Spiking_gradAdd");
// add the bias derivation here:
}
void Spiking::updateWeight()
{
dim3 block = min((w->getLen() + 255)/ 256, 5120);
dim3 thread = 256;
if(Config::instance()->getOptimizerType() == std::string("adam")){
g_adam_vecAdd<<<block, thread, 0, Layers::instance()->get_stream()>>>(
g1_w->getDev(),
g2_w->getDev(),
b1_t,
b2_t,
wgrad->getDev(),
w->getDev(),
w->getLen(),
Config::instance()->getLrate());
b1_t *= 0.9f; b2_t *= 0.999f;
}
else{
g_sgd_vecAdd<<<block, thread, 0, Layers::instance()->get_stream()>>>(
momentum_w->getDev(),
wgrad->getDev(),
w->getDev(),
w->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate());
}
// handle the bias here
}
Spiking::Spiking(std::string name)
{
m_name = name;
ConfigSpiking* config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
SpikingLayerBase * preLayer = (SpikingLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getSpikingOutputs();
inputs_time = preLayer->getSpikingTimeOutputs();
inputs_time_format = new cuMatrix<int>(inputs_time->rows, inputs_time->cols * inputs_time->channels, 1);
preDelta = preLayer->getCurDelta();
preDelta_format = NULL;
if(preDelta != NULL)
preDelta_format = new cuMatrix<float>(preDelta->rows, preDelta->cols * preDelta->channels, 1);
preFireCount = preLayer->getFireCount();
preFireCount_format = new cuMatrix<int>(preFireCount->rows, preFireCount->cols * preFireCount->channels, 1);
endTime = Config::instance()->getEndTime();
batch = Config::instance()->getBatchSize();
lambda = Config::instance()->getLambda();
beta = Config::instance()->getBeta();
T_REFRAC = config->m_t_ref;
TAU_M = config->m_tau_m;
TAU_S = config->m_tau_s;
inputSize = inputs->cols * inputs->channels / endTime;
outputSize = config->m_numNeurons;
weightLimit = Config::instance()->getWeightLimit();
UNDESIRED_LEVEL = config->m_undesired_level;
DESIRED_LEVEL = config->m_desired_level;
MARGIN = config->m_margin;
outputs = new cuMatrix<bool>(batch, outputSize * endTime, 1);
outputs_time = new cuMatrix<int>(batch, outputSize * endTime, 1);
// for fast input response
inputs_resp_tmp = new cuMatrix<float>(outputSize, endTime * batch, 1);
inputs_resp = new cuMatrix<float>(batch, outputSize * endTime, 1);
inputs_float = new cuMatrix<float>(inputSize, endTime * batch, 1);
curDelta = new cuMatrix<float>(batch, outputSize, 1);
fireCount= new cuMatrix<int>(batch, outputSize, 1);
weightSqSum = new cuMatrix<float>(outputSize, 1, 1);
maxCount = new cuMatrix<int>(batch, 1, 1);
accEffect = new cuMatrix<float>(batch, outputSize * inputSize, 1);
sideEffect = new cuMatrix<float>(batch, outputSize, 1);
predict = NULL;
// only for the output
if(config->m_name == std::string("output")){
groundTruth = new cuMatrix<float>(batch, outputSize, 1);
cost = new cuMatrix<float>(1, 1, 1);
}
else{
groundTruth = NULL;
cost = NULL;
}
assert(outputSize > 0 && inputSize > 0);
w = new cuMatrix<float>(outputSize, inputSize, 1);
b = new cuMatrix<float>(outputSize, 1, 1);
wgrad = new cuMatrix<float>(outputSize, inputSize, 1);
bgrad = new cuMatrix<float>(outputSize, 1, 1);
wgradTmp = new cuMatrix<float>(batch, outputSize * inputSize, 1);
if(config->hasLaterialWeight() == true){
w_laterial = new cuMatrix<float>(outputSize, outputSize, 1);
}
else
w_laterial = NULL;
threshold = config->m_vth;
// lateral inihibition factor for the output
lateralFactor = NULL;
lateralW = 0.0f;
if(config->hasLaterialInh() == true && config->m_name == std::string("output")){
lateralFactor = new cuMatrix<float>(batch, outputSize, 1);
lateralW = config->m_localInbStrength;
}
// use the e^k_{i|j} / o^{k-1}_j for estimating the grad of effect w.r.t to fire count
// notice that this variable is w[i][j] * e^k_{i|j} / o^{k-1}_j !
effectRatio = NULL;
if(Config::instance()->useEffectRatio()){
if(batch > 1){
printf("Must set batch size to 1 if use effect ratio for grad of synaptic effect.\n");
printf("Current batch size: %d\n", batch);
assert(batch <= 1);
}
effectRatio = new cuMatrix<float>(outputSize, inputSize, 1);
}
momentum_w = new cuMatrix<float>(outputSize, inputSize, 1);
momentum_b = new cuMatrix<float>(outputSize, 1, 1);
g1_w = new cuMatrix<float>(outputSize, inputSize, 1); // for adam
g1_b = new cuMatrix<float>(outputSize, 1, 1);
g2_w = new cuMatrix<float>(outputSize, inputSize, 1);
g2_b = new cuMatrix<float>(outputSize, 1, 1);
b1_t = 0.9;
b2_t = 0.999;
this->initRandom();
w_ref = NULL;
w_laterial_ref = NULL;
b_ref = NULL;
if(Config::instance()->getIsGradientChecking())
this->loadRef(); // for verification purpose
Layers::instance()->set(m_name, this);
}
void Spiking::save(FILE* file)
{
w->toCpu();
b->toCpu();
for(int c = 0; c < w->channels; c++){
for(int i = 0; i < w->rows; i++){
for(int j = 0; j < w->cols; j++){
fprintf(file, "%f ", w->get(i, j, c));
}
}
}
if(w_laterial != NULL){
for(int c = 0; c < w_laterial->channels; c++){
for(int i = 0; i < w_laterial->rows; i++){
for(int j = 0; j < w_laterial->cols; j++){
fprintf(file, "%f ", w_laterial->get(i, j, c));
}
}
}
}
for(int c = 0; c < b->channels; c++){
for(int i = 0; i < b->rows; i++){
for(int j = 0; j < b->cols; j++){
fprintf(file, "%f ", b->get(i, j, c));
}
}
}
}
void Spiking::clearMomentum()
{
momentum_b->gpuClear();
momentum_w->gpuClear();
}
void Spiking::verify(const std::string& phrase)
{
printf("Verify for the layer: %s at %s phrase.\n", m_name.c_str(), phrase.c_str());
if(phrase == std::string("train"))
{
if(!output_train_ref.empty()){
outputs->toCpu();
checkMatrixIsSame(output_train_ref[0], outputs, outputSize);
}
}
else if(phrase == std::string("test"))
{
if(w_ref != NULL){
w->toCpu();
checkMatrixIsSame(w_ref, w);
}
if(w_laterial_ref != NULL && w_laterial != NULL){
w_laterial->toCpu();
checkMatrixIsSame(w_laterial_ref, w_laterial);
}
if(b_ref != NULL){
b->toCpu();
checkMatrixIsSame(b_ref, b);
}
if(!output_test_ref.empty()){
outputs->toCpu();
checkMatrixIsSame(output_test_ref[0], outputs, outputSize);
}
}
printf("Verification for the layer: %s at %s phrase. Pased!!\n", m_name.c_str(), phrase.c_str());
}
//* load the reference weights and output spikes for verification
void Spiking::loadRef()
{
if(batch != 1){
printf("Only do the verification for one batch and one sample!\n");
exit(0);
}
ConfigSpiking * config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
if(config->m_ref_weight_path != std::string("NULL")){
w_ref = new cuMatrix<float>(outputSize, inputSize, 1);
initFromDumpfile(config->m_ref_weight_path, w_ref);
if(config->hasBias()){
b_ref = new cuMatrix<float>(outputSize, 1, 1);
initBiasFromDumpfile(config->m_ref_weight_path, b_ref);
}
}
if(config->m_ref_lweight_path != std::string("NULL")){
w_laterial_ref = new cuMatrix<float>(outputSize, outputSize, 1);
initFromDumpfile(config->m_ref_lweight_path, w_laterial_ref);
}
if(config->m_ref_output_train_path != std::string("NULL")){
read_each_speech_dump(config->m_ref_output_train_path, output_train_ref, endTime, outputSize);
assert(output_train_ref.size() == 1 && output_train_ref[0] != NULL);
output_train_ref[0]->rows = 1;
output_train_ref[0]->cols = endTime * outputSize;
}
if(config->m_ref_output_test_path != std::string("NULL")){
read_each_speech_dump(config->m_ref_output_test_path, output_test_ref, endTime, outputSize);
assert(output_test_ref.size() == 1 && output_test_ref[0] != NULL);
output_test_ref[0]->rows = 1;
output_test_ref[0]->cols = endTime * outputSize;
}
}
void Spiking::initRandom()
{
ConfigSpiking * config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
float initW = config->m_initW;
if(config->isGaussian()){
float epsilon = initW;
for(int c = 0; c < w->channels; c++)
{
createGaussian(w->getHost() + c * w->getArea(),
outputSize, inputSize, w->channels, epsilon);
}
w->toGpu();
}
else if(config->isBernoulli()){
for(int j = 0; j < w->getLen(); j++){
w->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w->getHost()[j]);
}//printf("\n");
w->toGpu();
}
else if(config->isFixed()){
// one input connects to nconnect randomly selected outputs, with initW/-initW
int nconnect = config->m_weightConnect;
assert(nconnect > 0);
for(int c = 0; c < w->channels; ++c){
for(int i = 0; i < w->rows; ++i){
for(int t = 0; t < nconnect; ++t){
int j = rand() % inputSize;
if(rand() % 2 == 0)
w->set(i, j, c, initW);
else
w->set(i, j, c, -1.0*initW);
//printf("input_%d to reservoir_%d : %f\n", j, i, w->get(i, j, c));
}
}
}
w->toGpu();
}
else if(config->isExternal()){
initFromDumpfile(config->m_weightPath, w);
}
if(config->hasLaterialWeight()){
initLaterial();
}
}
void Spiking::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int c = 0; c < w->channels; c++){
for(int i = 0; i < w->rows; i++){
for(int j = 0; j < w->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
char logStr[256];
sprintf(logStr, "scanf fail for layer: %s\n", m_name.c_str());
LOG(logStr, "Result/log.txt");
assert(0);
}
w->set(i, j, c, val);
}
}
}
if(w_laterial != NULL){
for(int c = 0; c < w_laterial->channels; c++){
for(int i = 0; i < w_laterial->rows; i++){
for(int j = 0; j < w_laterial->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
char logStr[256];
sprintf(logStr, "scanf fail for layer: %s\n", m_name.c_str());
LOG(logStr, "Result/log.txt");
}
w_laterial->set(i, j, c, val);
}
}
}
}
for(int c = 0; c < b->channels; c++){
for(int i = 0; i < b->rows; i++){
for(int j = 0; j < b->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
char logStr[256];
sprintf(logStr, "scanf fail for layer: %s\n", m_name.c_str());
LOG(logStr, "Result/log.txt");
assert(0);
}
b->set(i, j, c, val);
}
}
}
w->toGpu();
b->toGpu();
}
//* initial the weights from the dumped file by the CPU sim
void Spiking::initFromDumpfile(const std::string& filename, cuMatrix<float>*& cuW)
{
ifstream f_in(filename.c_str());
if(!f_in.is_open()){
printf("Cannot open the file: %s\n", filename.c_str());
exit(EXIT_FAILURE);
}
assert(cuW != NULL);
std::vector<std::vector<float> > weights(cuW->rows, std::vector<float>(cuW->cols, 0.0f));
int idx;
float weight;
std::string pre_name, post_name;
while(f_in>>idx>>pre_name>>post_name>>weight){
int pre = extractNeuronIndex(pre_name);
int post = extractNeuronIndex(post_name);
if(post >= weights.size() || pre >= weights[0].size()){
if(pre == weights[0].size() && post < weights.size()){ // this is related to bias
continue;
}
else{
printf("Read the file: %s, in line: %d\n", filename.c_str(), idx);
printf("Post: %d, OutputDim: %d\n Pre: %d, InputDim: %d\n", post, (int)weights.size(), pre, (int)weights[0].size());
assert(post < weights.size() && pre < weights[0].size());
}
}
weights[post][pre] += weight;
}
for(int c = 0; c < cuW->channels; c++){
for(int i = 0; i < cuW->rows; i++){
for(int j = 0; j < cuW->cols; j++){
cuW->set(i, j, c, weights[i][j]);
}
}
}
cuW->toGpu();
// verify that the weights is correctly copied!
for(int i = 0; i < weights.size(); ++i){
for(int j = 0; j < weights[0].size(); ++j){
assert(fabsf(cuW->get(i, j, 0) - weights[i][j]) < 1e-4);
}
}
}
//* initial the bias weights from the dumped file by the CPU sim
void Spiking::initBiasFromDumpfile(const std::string& filename, cuMatrix<float>*& cuW)
{
ifstream f_in(filename.c_str());
if(!f_in.is_open()){
printf("Cannot open the file: %s\n", filename.c_str());
exit(EXIT_FAILURE);
}
assert(cuW != NULL);
int idx;
float weight;
std::string pre_name, post_name;
while(f_in>>idx>>pre_name>>post_name>>weight){
int pre = extractNeuronIndex(pre_name);
int post = extractNeuronIndex(post_name);
if(pre == inputSize && post < outputSize){ // this is related to bias
cuW->set(post, 0, 0, weight);
}
}
cuW->toGpu();
}
void Spiking::initLaterial()
{
ConfigSpiking* config = (ConfigSpiking*)Config::instance()->getLayerByName(m_name);
if(config->m_laterialType == "RESERVOIR"){
initFromDumpfile(config->m_lweightPath, w_laterial);
//initReservoirConnection(config->m_reservoirDim);
}
else if(config->m_laterialType == "LOCAL_INHIBITION"){
initLocalInhibition(config->m_localInbStrength);
}
}
// intialize the reservoir connections
// TODO: improve the randomness of the reservoir (the bad random seed we used now!)
void Spiking::initReservoirConnection(const std::vector<int>& reservoirDim)
{
assert(reservoirDim.size() == 3);
assert(w_laterial != NULL);
int d1 = reservoirDim[0], d2 = reservoirDim[1], d3 = reservoirDim[2];
int num = d1 * d2 * d3;
if(num != outputSize){
printf("The reservoir dim: %d x %d x %d = %d does not match the number neuron: %d!\n",d1, d2, d3, num, outputSize);
exit(EXIT_FAILURE);
}
// adopted from the CPU code:
srand(5);
std::vector<bool> excitatory(num, false);
std::vector<dim3> coordinates;
for(int i = 0; i < excitatory.size(); ++i){
if(rand() % 100 < 20) excitatory[i] = false;
else excitatory[i] = true;
}
for(int i = 0; i < d1; ++i){
for(int j = 0; j < d2; ++j){
for(int k = 0; k < d3; ++k){
int index = (i * d2 + j) * d3 + k;
assert(index < excitatory.size());
coordinates.push_back(dim3(i, j, k));
}
}
}
double c, a;
double distsq, dist;
const double factor2 = 1.5;
for(int i = 0; i < num; ++i){
for(int j = 0; j < num; ++j){
if(excitatory[i]){
if(excitatory[j]){
c = 0.3 * factor2;
a = 1;
}
else{
c = 0.2 * factor2;
a = 1;
}
}
else{
if(excitatory[j]){
c = 0.4 * factor2;
a = -1;
}
else{
c = 0.1 * factor2;
a = -1;
}
}
distsq = 0;
dist = coordinates[i].x - coordinates[j].x;
distsq += dist * dist;
dist = coordinates[i].y - coordinates[j].y;
distsq += dist * dist;
dist = coordinates[i].z - coordinates[j].z;
distsq += dist * dist;
if(rand() % 100000 < 100000 * c * exp(-distsq / 4)){
//printf("reservoir_%d to reservoir_%d %f\n", i , j, a);
w_laterial->set(j, i, 0, a);
}
}
}
w_laterial->toGpu();
}
void Spiking::initLocalInhibition(float strength)
{
assert(w_laterial != NULL);
for(int c = 0; c < w_laterial->channels; c++){
for(int i = 0; i < w_laterial->rows; i++){
for(int j = 0; j < w_laterial->cols; j++){
if(i == j) continue;
w_laterial->set(i, j, c, -1*strength);
}
}
}
w_laterial->toGpu();
}
/* the device function to realize: weights * spikes(:, t - 1) + recurrent_weights * o_spikes(t - 1)
* I only consider the first order dynamics
* inputSize : number of input neurons
* outputSize : number of output neurons
*/
__device__ float d_Spiking_accumulate_spikes(
int inputSize,
int outputSize,
float* input_response,
bool* output,
int o_idx,
float* weights,
float* weights_lat,
float* biases,
int t,
int dummyFreq,
int endTime)
{
int idx = threadIdx.x;
if(idx >= outputSize * inputSize){
return 0;
}
float response = 0.0f;
// effect from the forward-connects
response = input_response[(t - 1) + o_idx * endTime];
// effect from the bias
if(t % dummyFreq == 0){
response += biases[idx];
}
if(weights_lat != NULL){
// effect from the recurrent connections:
for(int i = 0; i < outputSize; ++i)
response += output[i + (t - 1) * outputSize] ? weights_lat[i + o_idx * outputSize] : 0;
}
return response;
}
/* given each input and output spike train,
* compute the accumulative synaptic effect as the gradient
* input: input spikes: endTime * inputSize
* output: output spikes: endTime * outputSize
*/
__device__ float d_Spiking_gradient(
bool* output,
bool* input,
float delta,
int o_idx,
int i_idx,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
float acc_response = 0.0f;
int t_post_last = 1;
for(int t_post = 1; t_post < endTime; t_post++){
if(output[o_idx + t_post * outputSize] != true) continue;
float sum = 0.0f;
int ub = t_post;
int lb = max(1, int(t_post - 4*TAU_M));
for(int t_pre = lb; t_pre < ub; ++t_pre){
if(input[i_idx + t_pre * inputSize] != true) continue;
int pre_time = t_pre + T_REFRAC;
if(pre_time > t_post) continue;
int s = t_post - t_post_last;
int t = t_post - pre_time;
float factor = exp(-1*max(t - s, 0)/TAU_S)/(1 - TAU_S/TAU_M);
sum += factor * (exp(-1*min(s, t)/TAU_M) - exp(-1*min(s, t)/TAU_S));
}
t_post_last = t_post + T_REFRAC;
acc_response += sum;
}
float delta_w = delta * acc_response;
return delta_w;
}
/* given each input and output spike train of spike times,
* compute the accumulative synaptic effect as the gradient
* input: input spikes: endTime * inputSize
* output: output spikes: endTime * outputSize
*/
__device__ float d_Spiking_gradient_spiketime(
int* output_time,
int* input_time,
int n_ospikes,
int n_ispikes,
float delta,
int o_idx,
int i_idx,
float lat_factor,
int outputSize,
int inputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
float acc_response = d_Spiking_accumulate_effect(output_time, input_time, n_ospikes, n_ispikes, o_idx, i_idx, outputSize, inputSize, endTime, T_REFRAC, TAU_M, TAU_S);
float delta_w = delta * acc_response * lat_factor;
return delta_w;
}
/* compute the gradient for the bias
* input: input spikes: endTime * inputSize
* output: output spikes: endTime * outputSize
*/
__device__ float d_Spiking_bias_gradient_spiketime(
int* output_time,
int n_ospikes,
float delta,
int o_idx,
int dummyFreq,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
float acc_response = 0.0f;
int t_post_last = 1;
for(int i = 0; i < n_ospikes; ++i){
int t_post = output_time[o_idx * endTime + i];
float sum = 0.0f;
int ub = t_post;
int lb = max(1, int(t_post - 4*TAU_M));
for(int j = dummyFreq; j < endTime; j += dummyFreq){
int t_pre = j;
if(t_pre < lb || t_pre >= ub) continue;
int pre_time = t_pre + T_REFRAC;
if(pre_time > t_post) continue;
int s = t_post - t_post_last;
int t = t_post - pre_time;
float factor = exp(-1*max(t - s, 0)/TAU_S)/(1 - TAU_S/TAU_M);
sum += factor * (exp(-1*min(s, t)/TAU_M) - exp(-1*min(s, t)/TAU_S));
}
t_post_last = t_post + T_REFRAC;
acc_response += sum;
}
float delta_b = delta * acc_response;
return delta_b;
}
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getCost_output(
int* fireCount,
float* groundTruth,
float* cost,
int* y,
int batch,
int cols,
float UNDESIRED_LEVEL,
float DESIRED_LEVEL,
float MARGIN)
{
extern __shared__ float _sum[];
int len = batch * cols;
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len){
groundTruth[id] = UNDESIRED_LEVEL;
}
}
__syncthreads();
for(int i = 0; i < batch; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < batch){
int yy = y[id];
groundTruth[id * cols + yy] = DESIRED_LEVEL;
}
}
_sum[threadIdx.x] = 0;
__syncthreads();
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
float diff = fabsf(float(fireCount[id]) - groundTruth[id]);
_sum[threadIdx.x] += diff > MARGIN ? diff * diff : 0;
}
}
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1)>>1;
if(threadIdx.x < skip && (threadIdx.x + skip) < len)
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = skip;
}
__syncthreads();
if(threadIdx.x == 0)
{
cost[0] = _sum[0];
}
}
/*
* dim3 block = dim3(1);
* dim3 thread= dim3(256);
*/
__global__ void g_getDelta_output(float* outputDelta, int* fireCount, float* groundTruth, int len, float MARGIN)
{
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
float diff = fabsf(float(fireCount[id]) - groundTruth[id]);
outputDelta[id] = diff > MARGIN ? fireCount[id] - groundTruth[id] : 0;
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(outputSize);
*/
__global__ void g_boostWeight_output(float* outputDelta, float* sample_weights, int len)
{
int batchId = blockIdx.x;
float sample_weight = sample_weights[batchId];
int outputSize = blockDim.x;
int tid = threadIdx.x;
int target = tid + batchId * outputSize;
if(target < len)
outputDelta[target] *= sample_weight;
}
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_getLateralFactor_output(
int* outputs_time,
int* batchFireCount,
float w0,
int* y,
float* batchLFactor,
float vth,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float d_sum[];
int tid = threadIdx.x;
d_sum[tid] = 0;
__syncthreads();
int batchId = blockIdx.x;
int j_idx = blockIdx.y;
int outputSize2 = endTime * outputSize;
int* output_time = outputs_time + batchId * outputSize2;
int* output_fireCount = batchFireCount + batchId * outputSize;
int cls = y[batchId];
float * lateral_factors = batchLFactor + batchId * outputSize;
int f_cnt_j = output_fireCount[j_idx];
float d_j = (f_cnt_j > 0 || (f_cnt_j == 0 && j_idx == cls)) ? 1 / vth : 0;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int l_idx = i + tid;
if(l_idx < outputSize && j_idx != l_idx)
{
int f_cnt_l = output_fireCount[l_idx];
float d_l = (f_cnt_l > 0 || (f_cnt_l == 0 && l_idx == cls)) ? 1 / vth : 0;
// j --> l
float e_jl = d_Spiking_accumulate_effect(output_time, output_time, f_cnt_l, f_cnt_j, l_idx, j_idx, outputSize, outputSize, endTime, T_REFRAC, TAU_M, TAU_S);
float effect_ratio_jl = (f_cnt_j == 0 || f_cnt_l == 0) ? 1 : e_jl / f_cnt_j;
// l --> j
float e_lj = d_Spiking_accumulate_effect(output_time, output_time, f_cnt_j, f_cnt_l, j_idx, l_idx, outputSize, outputSize, endTime, T_REFRAC, TAU_M, TAU_S);
float effect_ratio_lj = (f_cnt_l == 0 || f_cnt_j == 0) ? 1 : e_lj / f_cnt_l;
d_sum[tid] += effect_ratio_jl * d_l * effect_ratio_lj * d_j;
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
d_sum[tid] += d_sum[tid + skip];
}
len = skip;
}
if(tid == 0)
{
lateral_factors[j_idx] = 1.0f / (1 - d_sum[0] * w0 * w0);
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(outputSize);
*/
__global__ void g_getMaxCount(int* fireCount, int* maxCount, int cols)
{
extern __shared__ int _max[];
int batchId = blockIdx.x;
int len = blockDim.x;
int id = threadIdx.x;
_max[id] = 0;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int ttid = tid + id;
if(ttid < cols){
_max[threadIdx.x] = max(_max[threadIdx.x], fireCount[ttid + batchId * cols]);
}
}
_max[id] = fireCount[id + batchId * cols];
while(len != 1)
{
__syncthreads();
int skip = (len + 1)>>1;
if(id < skip && (id + skip) < len)
{
_max[id] = max(_max[id], _max[id + skip]);
}
len = skip;
}
__syncthreads();
if(id == 0)
{
maxCount[batchId] = _max[0];
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(1024, outputSize));
*/
__global__ void g_modifySpikes(bool* outputs, int* y, int* fireCount, int target_level, int endTime, int outputSize)
{
int batchId = blockIdx.x;
int target = y == NULL ? -1 : y[batchId];
int mCnt = target_level;
bool* outputSpikes = outputs + batchId * endTime * outputSize;
for(int id = 0; id < outputSize; id += blockDim.x){
int o_idx = id + threadIdx.x;
if(o_idx < outputSize)
{
if(o_idx != target)
return;
if(fireCount[o_idx + batchId * outputSize] == 0)
{
int count = 0;
int interval = endTime / mCnt;
for(int t = interval; t < endTime; t += interval)
{
outputSpikes[o_idx + t * outputSize] = true;
count++;
}
fireCount[o_idx + batchId * outputSize] = count;
}
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(outputSize, 1024));
*/
__global__ void g_Spiking_feedforward(
float* inputs_resp,
float* w,
float* w_l,
float* b,
bool* outputs,
int* fireCount,
int inputSize,
int outputSize,
int endTime,
float vth,
int dummyFreq,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int outputSize2 = endTime * outputSize;
bool* curOutput = outputs + batchId * outputSize2;
float* curInput = inputs_resp + batchId * outputSize2;//inputs_resp:batch * outputSize*endTime
int* curFireCount = fireCount + batchId * outputSize;
// simulate the spiking train
for(int tidx = 0; tidx < outputSize; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize)
{
float v = 0.0f;
float ep = 0.0f;
float threshold = vth - 1e-6; // migitate the numerical disparity due to fast response
int t_ref= 0;
float response = 0.0f;
int fire_count = 0;
for(int t = 0; t < endTime; t++){
// 1. leakage
v -= v / TAU_M;
ep -= ep / TAU_S;
if(t == 0)
{
curOutput[o_idx + t * outputSize] = false;
continue;
}
// 2. receive the spike inputs
__syncthreads(); // make sure all the threads has generated the spikes for the last time step
response = d_Spiking_accumulate_spikes(inputSize, outputSize, curInput, curOutput, o_idx, w, w_l, b, t, dummyFreq, endTime);
// 3. Add up the response to ep (state variable)
ep += response;
// 4. Update the vmem accordingly
v += ep/TAU_S;
if(t_ref > 0){
v = 0;
t_ref--;
}
// 5. Fire or not
curOutput[o_idx + t * outputSize] = v > threshold ? true : false;
t_ref = v > threshold ? T_REFRAC : t_ref;
fire_count += v > threshold ? 1 : 0;
v = v > threshold ? 0 : v;
}
curFireCount[o_idx] = fire_count;
}
}
}
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_wgrad(
bool* inputs,
bool* outputs,
float* curDelta,
float* wgradTmp,
int inputSize,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int i_idx = blockIdx.y;
int wSize = outputSize * inputSize;
int inputSize2 = endTime * inputSize;
int outputSize2 = endTime * outputSize;
int curDeltaSize = outputSize;
float* wgrad = wgradTmp + batchId * wSize;
bool* input = inputs + batchId * inputSize2;
bool* output = outputs + batchId * outputSize2;
float* cDelta = curDelta + batchId * curDeltaSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
float delta_w = d_Spiking_gradient(output, input, cDelta[o_idx], o_idx, i_idx, outputSize, inputSize, endTime, T_REFRAC, TAU_M, TAU_S);
wgrad[i_idx + o_idx * inputSize] = delta_w;
}
}
}
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_sideEffect(
float* weights,
int* batchFireCount,
float* batchAccEffect,
float vth,
int inputSize,
int outputSize,
float * batchSideEffect)
{
int batchId = blockIdx.x;
int o_idx = blockIdx.y;
int tid = threadIdx.x;
extern __shared__ float _sum[];
_sum[tid] = 0;
__syncthreads();
int wSize = outputSize * inputSize;
int* fireCount = batchFireCount + batchId * outputSize;
float* acc_effect= batchAccEffect + batchId * wSize;
float* side_effect = batchSideEffect + batchId * outputSize;
int o_cnt = fireCount[o_idx];
for(int i = 0; i < inputSize; i += blockDim.x)
{
int idx = i + tid;
if(idx < inputSize)
{
float w = weights[idx + o_idx * inputSize];
float e = acc_effect[idx + o_idx * inputSize];
float ratio = o_cnt == 0 ? 0.5 : e/o_cnt;
_sum[tid] += w * ratio;
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0){
side_effect[o_idx] = _sum[0]/vth;
}
}
/*
* dim3 block = dim3(batch, outputSize);
* dim3 thread= min(1024, inputSize);
*/
__global__ void g_Spiking_wgrad_spiketime(
float* batchSideEffect,
float* batchAccEffect,
float* curDelta,
float* latFactor,
float* wgradTmp,
int inputSize,
int outputSize)
{
int batchId = blockIdx.x;
int o_idx = blockIdx.y;
int tid = threadIdx.x;
int wSize = outputSize * inputSize;
int curDeltaSize = outputSize;
float* wgrad = wgradTmp + batchId * wSize;
float* acc_effect = batchAccEffect + batchId * wSize;
float* side_effect = batchSideEffect + batchId * outputSize;
float* cDelta = curDelta + batchId * curDeltaSize;
float* lFactor = latFactor == NULL ? NULL : latFactor + batchId * curDeltaSize;
float s_effect = side_effect[o_idx];
float latFac = lFactor == NULL ? 1.0f : lFactor[o_idx];
float delta = cDelta[o_idx];
for(int i = 0; i < inputSize; i += blockDim.x)
{
int i_idx = i + tid;
if(i_idx < inputSize)
{
float compen_effect = acc_effect[i_idx + o_idx * inputSize] * (1 + s_effect);
float delta_w = delta * compen_effect * latFac;
wgrad[i_idx + o_idx * inputSize] = delta_w;
}
}
}
/*
* dim3 block = dim3(batch);
* dim3 thread= dim3(min(1024, outputSize));
*/
__global__ void g_Spiking_bgrad_spiketime(
int* outputs_time,
int* batchFireCount,
float* curDelta,
float* bgradTmp,
int outputSize,
int endTime,
int dummyFreq,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int bSize = outputSize;
int outputSize2 = endTime * outputSize;
int curDeltaSize = outputSize;
float* bgrad = bgradTmp + batchId * bSize;
int* output_time = outputs_time + batchId * outputSize2;
int* output_fireCount = batchFireCount + batchId * outputSize;
float* cDelta = curDelta + batchId * curDeltaSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
float delta_b = d_Spiking_bias_gradient_spiketime(output_time, output_fireCount[o_idx], cDelta[o_idx], o_idx, dummyFreq, outputSize, endTime, T_REFRAC, TAU_M, TAU_S);
bgrad[o_idx] = delta_b;
}
}
}
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_synaptic_effect(
int* inputs_time,
int* outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* w,
float* batchAccEffect,
float* effectRatio,
int inputSize,
int outputSize,
int endTime,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int i_idx = blockIdx.y;
int wSize = outputSize * inputSize;
int inputSize2 = endTime * inputSize;
int outputSize2 = endTime * outputSize;
int* input_time = inputs_time + batchId * inputSize2;
int* output_time = outputs_time + batchId * outputSize2;
int* input_fireCount = batchPreFireCount + batchId * inputSize;
int* output_fireCount = batchFireCount + batchId * outputSize;
float* acc_effect = batchAccEffect + batchId * wSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
float e = d_Spiking_accumulate_effect(output_time, input_time, output_fireCount[o_idx], input_fireCount[i_idx], o_idx, i_idx, outputSize, inputSize, endTime, T_REFRAC, TAU_M, TAU_S);
acc_effect[i_idx + o_idx * inputSize] = e;
if(effectRatio != NULL){
int o_cnt = output_fireCount[o_idx];
int i_cnt = input_fireCount[i_idx];
float ratio = i_cnt == 0 || o_cnt == 0 ? 1 : e / float(i_cnt);
effectRatio[i_idx + o_idx * inputSize] = ratio * w[i_idx + o_idx * inputSize];
}
}
}
}
/*
* dim3 block = dim3(batch, inputSize);
* dim3 thread= min(1024, outputSize);
*/
__global__ void g_Spiking_debug_spiketime(
int* inputs_time,
int* outputs_time,
int* batchPreFireCount,
int* batchFireCount,
int inputSize,
int outputSize,
int endTime)
{
int batchId = blockIdx.x;
int i_idx = blockIdx.y;
int inputSize2 = endTime * inputSize;
int outputSize2 = endTime * outputSize;
int* input_time = inputs_time + batchId * inputSize2;
int* output_time = outputs_time + batchId * outputSize2;
int* input_fireCount = batchPreFireCount + batchId * outputSize;
int* output_fireCount = batchFireCount + batchId * outputSize;
for(int i = 0; i < outputSize; i += blockDim.x)
{
int o_idx = i + threadIdx.x;
if(o_idx < outputSize)
{
if(i_idx == I_IDX && o_idx == O_IDX){
printf("Input %d fires: ", i_idx);
for(int i = 0; i < input_fireCount[i_idx]; i++) printf("%d\t", input_time[i_idx * endTime + i]);
printf("\n");
printf("Output %d fires: ", o_idx);
for(int j = 0; j < output_fireCount[o_idx]; j++) printf("%d\t", output_time[o_idx * endTime + j]);
printf("\n");
}
}
}
}
|
85611448ee0b1e8680e66503c6d671269803b283.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "im2col.h"
#include "THHTensor.hpp"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "generic/SpatialFullDilatedConvolution.cu"
#include "THHGenerateFloatTypes.h"
| 85611448ee0b1e8680e66503c6d671269803b283.cu | #include "THCUNN.h"
#include "im2col.h"
#include "THCTensor.hpp"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "generic/SpatialFullDilatedConvolution.cu"
#include "THCGenerateFloatTypes.h"
|
dfbd5e163dc2b90be5932f26d66b5161711db0fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaUtility.cuh"
__duel__ void check(const bool & isNotAssert, const char * errorInfo, const char * file, const int & line)
{
if (!isNotAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info:%s\n", errorInfo);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Bool & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
const char* trueStr = "true";
const char* falseStr = "false";
const char* boolVal = val ? trueStr : falseStr;
printf("Error Int val: %s\n", boolVal);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Int & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
printf("Error Int val: %d\n", val);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Uint & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
printf("Error Int val: %u\n", val);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Float & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
printf("Error Float val: %f\n", val);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Int* valArry, const Uint & size, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
for (Uint i = 0; i < size; i++)
{
printf("Error Int valArray[%u]: %d\n", i,valArry[i]);
}
if (valArry) delete[]valArry;
printf("====================Error Occurred!====================\n");
assert(false);
}
}
void error_check(hipError_t err, const char * file, int line)
{
if (err != hipSuccess) {
::fprintf(stderr, "CUDA ERROR at %s[%d] : %s\n", file, line, hipGetErrorString(err));
abort();
}
}
#ifdef RUN_ON_DEVICE
__global__ void SetupDeviceStates()
{
Int globalIdx = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(10086, globalIdx, 0, &deviceStates[globalIdx]);
}
__host__ void InitDeviceStates(const Int& length)
{
hiprandState_t* deviceStatesH = nullptr;
if (deviceStatesH)
{
hipFree(deviceStatesH);
deviceStatesH = nullptr;
}
hipMalloc(&deviceStatesH, length * sizeof(hiprandState_t));
hipMemcpyToSymbol(deviceStates, &deviceStatesH, sizeof(hiprandState_t*));
hipError_t error = hipGetLastError();
if (error != hipError_t::hipSuccess)
{
printf("%s\n", hipGetErrorString(error));
}
const Int threadNum = 32;
Int blockNum = length / threadNum;
SetupDeviceStates << <blockNum, threadNum >> > ();
error = hipGetLastError();
if (error != hipError_t::hipSuccess)
{
printf("%s\n", hipGetErrorString(error));
}
}
__device__ Float GetUniformRand()
{
const Int& globalIdx = blockIdx.x*blockDim.x + threadIdx.x;
hiprandState_t& localState = deviceStates[globalIdx];
return hiprand_uniform(&localState);
}
#endif // RUN_ON_DEVICE
#ifdef RUN_ON_HOST
__host__ Float GetUniformRand()
{
static std::default_random_engine randEngine(rand());
static std::uniform_real_distribution<Float> randGenerator(0.0, 1.0);
return randGenerator(randEngine);
}
#endif // RUN_ON_HOST | dfbd5e163dc2b90be5932f26d66b5161711db0fd.cu | #include "CudaUtility.cuh"
__duel__ void check(const bool & isNotAssert, const char * errorInfo, const char * file, const int & line)
{
if (!isNotAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info:%s\n", errorInfo);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Bool & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
const char* trueStr = "true";
const char* falseStr = "false";
const char* boolVal = val ? trueStr : falseStr;
printf("Error Int val: %s\n", boolVal);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Int & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
printf("Error Int val: %d\n", val);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Uint & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
printf("Error Int val: %u\n", val);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Float & val, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
printf("Error Float val: %f\n", val);
printf("====================Error Occurred!====================\n");
assert(false);
}
}
__duel__ void check(const bool & isAssert, const char * errorInfo, const Int* valArry, const Uint & size, const char * file, const int & line)
{
if (!isAssert)
{
printf("====================Error Occurred!====================\n");
printf("Error occurred in: %s\n", file);
printf("Line: %d\n", line);
printf("Error info: %s\n", errorInfo);
for (Uint i = 0; i < size; i++)
{
printf("Error Int valArray[%u]: %d\n", i,valArry[i]);
}
if (valArry) delete[]valArry;
printf("====================Error Occurred!====================\n");
assert(false);
}
}
void error_check(cudaError_t err, const char * file, int line)
{
if (err != cudaSuccess) {
::fprintf(stderr, "CUDA ERROR at %s[%d] : %s\n", file, line, cudaGetErrorString(err));
abort();
}
}
#ifdef RUN_ON_DEVICE
__global__ void SetupDeviceStates()
{
Int globalIdx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(10086, globalIdx, 0, &deviceStates[globalIdx]);
}
__host__ void InitDeviceStates(const Int& length)
{
curandState* deviceStatesH = nullptr;
if (deviceStatesH)
{
cudaFree(deviceStatesH);
deviceStatesH = nullptr;
}
cudaMalloc(&deviceStatesH, length * sizeof(curandState));
cudaMemcpyToSymbol(deviceStates, &deviceStatesH, sizeof(curandState*));
cudaError_t error = cudaGetLastError();
if (error != cudaError_t::cudaSuccess)
{
printf("%s\n", cudaGetErrorString(error));
}
const Int threadNum = 32;
Int blockNum = length / threadNum;
SetupDeviceStates << <blockNum, threadNum >> > ();
error = cudaGetLastError();
if (error != cudaError_t::cudaSuccess)
{
printf("%s\n", cudaGetErrorString(error));
}
}
__device__ Float GetUniformRand()
{
const Int& globalIdx = blockIdx.x*blockDim.x + threadIdx.x;
curandState& localState = deviceStates[globalIdx];
return curand_uniform(&localState);
}
#endif // RUN_ON_DEVICE
#ifdef RUN_ON_HOST
__host__ Float GetUniformRand()
{
static std::default_random_engine randEngine(rand());
static std::uniform_real_distribution<Float> randGenerator(0.0, 1.0);
return randGenerator(randEngine);
}
#endif // RUN_ON_HOST |
d878b6b5a86e4588c7ccfd3acc2fb1789e89a9ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define SIZE 12
#define nThreads 1
#define nBlocks 2
__global__ void splitBladeKernel(int * dev_a){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int inf_limit = SIZE/(nBlocks)*idx;
int sup_limit = inf_limit+SIZE/(nBlocks)-1;
if(idx<SIZE){
printf("\n%d-%d", inf_limit, sup_limit);
}
//printf("\n[DEVICE][BLOCK:%d; THREAD:%d] dev_a[%d] = %d; dev_a[%d] = %d",blockIdx.x, threadIdx.x, idx, dev_a[idx], idx+1, dev_a[idx+1]);
__syncthreads();
}
void fill_array(int *host_a);
void show_array(int *host_a);
int main (void){
printf("[HOST] InitBlade\n");
int * host_a = (int *) malloc(sizeof(int)*SIZE);
fill_array(host_a);
show_array(host_a);
printf("\n");
int * dev_a;
hipMalloc((void**)&dev_a, sizeof(int)*SIZE);
hipMemcpy(dev_a, host_a, sizeof(int)*SIZE, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( splitBladeKernel), dim3(nBlocks),dim3(nThreads), 0, 0, dev_a);
hipDeviceSynchronize();
hipFree(dev_a);
printf("\n");
return 0;
}
void fill_array(int *host_a){
int i;
for(i=0; i<SIZE; i++)
host_a[i] = i + 1;
}
void show_array(int *host_a){
int i;
for(i=0; i<SIZE; i++)
printf("\nhost_a[%d] = %d",i, host_a[i]);
} | d878b6b5a86e4588c7ccfd3acc2fb1789e89a9ef.cu | #include <stdio.h>
#define SIZE 12
#define nThreads 1
#define nBlocks 2
__global__ void splitBladeKernel(int * dev_a){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int inf_limit = SIZE/(nBlocks)*idx;
int sup_limit = inf_limit+SIZE/(nBlocks)-1;
if(idx<SIZE){
printf("\n%d-%d", inf_limit, sup_limit);
}
//printf("\n[DEVICE][BLOCK:%d; THREAD:%d] dev_a[%d] = %d; dev_a[%d] = %d",blockIdx.x, threadIdx.x, idx, dev_a[idx], idx+1, dev_a[idx+1]);
__syncthreads();
}
void fill_array(int *host_a);
void show_array(int *host_a);
int main (void){
printf("[HOST] InitBlade\n");
int * host_a = (int *) malloc(sizeof(int)*SIZE);
fill_array(host_a);
show_array(host_a);
printf("\n");
int * dev_a;
cudaMalloc((void**)&dev_a, sizeof(int)*SIZE);
cudaMemcpy(dev_a, host_a, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
splitBladeKernel<<<nBlocks,nThreads>>>(dev_a);
cudaDeviceSynchronize();
cudaFree(dev_a);
printf("\n");
return 0;
}
void fill_array(int *host_a){
int i;
for(i=0; i<SIZE; i++)
host_a[i] = i + 1;
}
void show_array(int *host_a){
int i;
for(i=0; i<SIZE; i++)
printf("\nhost_a[%d] = %d",i, host_a[i]);
} |
4e8dd7448497d47d8e5831fc5b1b4223ff92ecdd.hip | // !!! This is a file automatically generated by hipify!!!
// originated from NVidia's sample for vectorAdd
#include <stdio.h>
#include <unistd.h>
//#define MYTHREADS 1920
#define MYTHREADS 1024
#define MYELEMENTS 70000000
// Makefile passes it in via -DDO_CUDA
//#define DO_CUDA
#ifdef DO_CUDA
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
//#include <helper_cuda.h>
#endif
__device__
void myfunc1(float a, float b, float* c) {
*(c) =
(a + b)*(a + b)*(a+b)
+(a + b*a + b)*(a+b)
+(a * b)*(a + b*(a+b))
+(a + b)*(a + b)*(a+b)
+(a + b*(a + b)*a+b)
+(a * b*(a - b))*(a+b)
;
}
#define myfunc(a, b, c) \
* c = \
(a + b)*(a + b)*(a+b) \
+(a + b*a + b)*(a+b) \
+(a * b)*(a + b*(a+b)) \
+(a + b)*(a + b)*(a+b) \
+(a + b*(a + b)*a+b) \
+(a * b*(a - b))*(a+b) \
#ifdef DO_CUDA
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
//int j = 0;
float *pC;
if ( // j++ < MYTHREADS &&
i < numElements) {
pC = &(C[i]);
myfunc1(A[i], B[i], pC);
}
}
#else
void vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = 0;
float *pC;
while( i < numElements) {
pC = &(C[i]);
myfunc(A[i], B[i], pC);
++i;
}
}
#endif
/**
* Host main routine
*/
int main(void) {
#ifdef DO_CUDA
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
#endif
// Print the vector length to be used, and compute its size
int numElements = MYELEMENTS;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i) {
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
#ifdef DO_CUDA
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess) {
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess) {
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess) {
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
//int threadsPerBlock = 256;
//int threadsPerBlock = 1;
int threadsPerBlock = MYTHREADS;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#else
vectorAdd(h_A, h_B, h_C, numElements);
#endif
// Verify that the result vector is correct
float myc;
float* pmyc = &myc;
for (int i = 0; i < numElements; ++i) {
myfunc(h_A[i],h_B[i],pmyc);
if (fabs(myc - h_C[i]) > 1e-5) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
#ifdef DO_CUDA
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess) {
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess) {
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess) {
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#endif
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
while(1)
sleep(5);
return 0;
}
| 4e8dd7448497d47d8e5831fc5b1b4223ff92ecdd.cu | // originated from NVidia's sample for vectorAdd
#include <stdio.h>
#include <unistd.h>
//#define MYTHREADS 1920
#define MYTHREADS 1024
#define MYELEMENTS 70000000
// Makefile passes it in via -DDO_CUDA
//#define DO_CUDA
#ifdef DO_CUDA
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//#include <helper_cuda.h>
#endif
__device__
void myfunc1(float a, float b, float* c) {
*(c) =
(a + b)*(a + b)*(a+b)
+(a + b*a + b)*(a+b)
+(a * b)*(a + b*(a+b))
+(a + b)*(a + b)*(a+b)
+(a + b*(a + b)*a+b)
+(a * b*(a - b))*(a+b)
;
}
#define myfunc(a, b, c) \
* c = \
(a + b)*(a + b)*(a+b) \
+(a + b*a + b)*(a+b) \
+(a * b)*(a + b*(a+b)) \
+(a + b)*(a + b)*(a+b) \
+(a + b*(a + b)*a+b) \
+(a * b*(a - b))*(a+b) \
#ifdef DO_CUDA
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
//int j = 0;
float *pC;
if ( // j++ < MYTHREADS &&
i < numElements) {
pC = &(C[i]);
myfunc1(A[i], B[i], pC);
}
}
#else
void vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = 0;
float *pC;
while( i < numElements) {
pC = &(C[i]);
myfunc(A[i], B[i], pC);
++i;
}
}
#endif
/**
* Host main routine
*/
int main(void) {
#ifdef DO_CUDA
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
#endif
// Print the vector length to be used, and compute its size
int numElements = MYELEMENTS;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i) {
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
#ifdef DO_CUDA
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
//int threadsPerBlock = 256;
//int threadsPerBlock = 1;
int threadsPerBlock = MYTHREADS;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#else
vectorAdd(h_A, h_B, h_C, numElements);
#endif
// Verify that the result vector is correct
float myc;
float* pmyc = &myc;
for (int i = 0; i < numElements; ++i) {
myfunc(h_A[i],h_B[i],pmyc);
if (fabs(myc - h_C[i]) > 1e-5) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
#ifdef DO_CUDA
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#endif
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
while(1)
sleep(5);
return 0;
}
|
2c0ac6bb9f3b4ccd67d8defa37ca64a67aa62398.hip | // !!! This is a file automatically generated by hipify!!!
#include "image"
#ifndef IMAGECONVOLUTIONSERIAL
#define IMAGECONVOLUTIONSERIAL
#define KERNELDIMENSION 3
void applyKernelToImageSerial(float *image, int imageWidth, int imageHeight, float *kernel, int kernelDimension, char *imagePath);
void flipKernel(float *kernel, int kernelDimension);
void loadAllKernels(float **kernels, FILE *fp);
int getNumKernels(FILE *fp);
float applyKernelPerPixel(int y, int x, int kernelX, int kernelY, int imageWidth, int imageHeight, float *kernel, float *image);
void imageConvolutionSerial(const char *imageFilename, char **argv)
{
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
//Get Kernels
FILE *fp = fopen("kernels.txt", "r");
if (fp == NULL)
{
perror("Error in opening file");
exit(EXIT_FAILURE);
}
int numKernels = getNumKernels(fp);
// int kernelDimension = 3;
float **kernels = (float **)malloc(sizeof(float *) * numKernels);
for (int i = 0; i < numKernels; i++)
{
kernels[i] = (float *)malloc(sizeof(float) * 100);
}
loadAllKernels(kernels, fp);
fclose(fp);
for (int i = 0; i < 10; i++)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for (int i = 0; i < numKernels; i++)
{
applyKernelToImageSerial(hData, width, height, kernels[i], KERNELDIMENSION, imagePath);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Time Serial Implementation: %f \n", milliseconds);
}
}
void applyKernelToImageSerial(float *image, int imageWidth, int imageHeight, float *kernel, int kernelDimension, char *imagePath)
{
//printImage(image,imageWidth,imageHeight,"originalImage.txt");
unsigned int size = imageWidth * imageHeight * sizeof(float);
float *newImage = (float *)malloc(size);
for (int y = 0; y < imageHeight; y++)
{
for (int x = 0; x < imageWidth; x++)
{
float sum = applyKernelPerPixel(y, x, kernelDimension, kernelDimension, imageWidth, imageHeight, kernel, image);
//Normalising output
if (sum < 0)
sum = 0;
else if (sum > 1)
sum = 1;
newImage[y * imageWidth + x] = sum;
}
}
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_serial_out.pgm");
sdkSavePGM(outputFilename, newImage, imageWidth, imageHeight);
}
float applyKernelPerPixel(int y, int x, int kernelX, int kernelY, int imageWidth, int imageHeight, float *kernel, float *image)
{
float sum = 0;
int offsetX = (kernelX - 1) / 2;
int offsetY = (kernelY - 1) / 2;
for (int j = 0; j < kernelY; j++)
{
//Ignore out of bounds
if (y + j < offsetY || y + j - offsetY >= imageHeight)
continue;
for (int i = 0; i < kernelX; i++)
{
//Ignore out of bounds
if (x + i < offsetX || x + i - offsetX >= imageWidth)
continue;
float k = kernel[i + j * kernelY];
float imageElement = image[y * imageWidth + x + i - offsetX + imageWidth * (j - 1)];
float value = k * imageElement;
sum = sum + value;
}
}
return sum;
}
#endif
| 2c0ac6bb9f3b4ccd67d8defa37ca64a67aa62398.cu | #include "image"
#ifndef IMAGECONVOLUTIONSERIAL
#define IMAGECONVOLUTIONSERIAL
#define KERNELDIMENSION 3
void applyKernelToImageSerial(float *image, int imageWidth, int imageHeight, float *kernel, int kernelDimension, char *imagePath);
void flipKernel(float *kernel, int kernelDimension);
void loadAllKernels(float **kernels, FILE *fp);
int getNumKernels(FILE *fp);
float applyKernelPerPixel(int y, int x, int kernelX, int kernelY, int imageWidth, int imageHeight, float *kernel, float *image);
void imageConvolutionSerial(const char *imageFilename, char **argv)
{
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
//Get Kernels
FILE *fp = fopen("kernels.txt", "r");
if (fp == NULL)
{
perror("Error in opening file");
exit(EXIT_FAILURE);
}
int numKernels = getNumKernels(fp);
// int kernelDimension = 3;
float **kernels = (float **)malloc(sizeof(float *) * numKernels);
for (int i = 0; i < numKernels; i++)
{
kernels[i] = (float *)malloc(sizeof(float) * 100);
}
loadAllKernels(kernels, fp);
fclose(fp);
for (int i = 0; i < 10; i++)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for (int i = 0; i < numKernels; i++)
{
applyKernelToImageSerial(hData, width, height, kernels[i], KERNELDIMENSION, imagePath);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time Serial Implementation: %f \n", milliseconds);
}
}
void applyKernelToImageSerial(float *image, int imageWidth, int imageHeight, float *kernel, int kernelDimension, char *imagePath)
{
//printImage(image,imageWidth,imageHeight,"originalImage.txt");
unsigned int size = imageWidth * imageHeight * sizeof(float);
float *newImage = (float *)malloc(size);
for (int y = 0; y < imageHeight; y++)
{
for (int x = 0; x < imageWidth; x++)
{
float sum = applyKernelPerPixel(y, x, kernelDimension, kernelDimension, imageWidth, imageHeight, kernel, image);
//Normalising output
if (sum < 0)
sum = 0;
else if (sum > 1)
sum = 1;
newImage[y * imageWidth + x] = sum;
}
}
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_serial_out.pgm");
sdkSavePGM(outputFilename, newImage, imageWidth, imageHeight);
}
float applyKernelPerPixel(int y, int x, int kernelX, int kernelY, int imageWidth, int imageHeight, float *kernel, float *image)
{
float sum = 0;
int offsetX = (kernelX - 1) / 2;
int offsetY = (kernelY - 1) / 2;
for (int j = 0; j < kernelY; j++)
{
//Ignore out of bounds
if (y + j < offsetY || y + j - offsetY >= imageHeight)
continue;
for (int i = 0; i < kernelX; i++)
{
//Ignore out of bounds
if (x + i < offsetX || x + i - offsetX >= imageWidth)
continue;
float k = kernel[i + j * kernelY];
float imageElement = image[y * imageWidth + x + i - offsetX + imageWidth * (j - 1)];
float value = k * imageElement;
sum = sum + value;
}
}
return sum;
}
#endif
|
05698ddd0327d15e3262a4cd153c3e8e0fa5d616.hip | // !!! This is a file automatically generated by hipify!!!
#include <drivers/adagrad_driver.h>
#include <solvers/adagrad.h>
#include <core/errors.h>
#include <device/cuda_utils.h>
#include <device/gen_random.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <functions/dev_initializations.h>
#include <utilities/print_utils.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
void initAdagradParams( ADAGRAD_PARAMS *params, int n )
{
//sampled_tr_cg.m file.
params->step= 0.001; //learning rate
params->eps = 1e-8; //eps
params->lambda = 0;
params->maxProps = ULONG_MAX;
params->maxEpochs = 20;
params->sampleSize = floor( 256 );
}
void testAdagrad (NN_MODEL *model, DEVICE_DATASET *data,
SCRATCH_AREA *scratch ) {
ADAGRAD_PARAMS mParams;
//begin here
fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n");
initAdagradParams( &mParams, data->trainSizeX );
fprintf( stderr, "... Done parms initialization \n\n");
//init weights to ZEROS
cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET );
//init weights to Random Vector
/*
getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL );
copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize,
ERROR_MEMCPY_DEVICE_DEVICE );
real scale = 0.25;
cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 ));
*/
adagrad ( model, data, scratch, &mParams );
fprintf( stderr, ".... Done testing of Adagrad \n\n\n" );
}
| 05698ddd0327d15e3262a4cd153c3e8e0fa5d616.cu |
#include <drivers/adagrad_driver.h>
#include <solvers/adagrad.h>
#include <core/errors.h>
#include <device/cuda_utils.h>
#include <device/gen_random.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <functions/dev_initializations.h>
#include <utilities/print_utils.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
void initAdagradParams( ADAGRAD_PARAMS *params, int n )
{
//sampled_tr_cg.m file.
params->step= 0.001; //learning rate
params->eps = 1e-8; //eps
params->lambda = 0;
params->maxProps = ULONG_MAX;
params->maxEpochs = 20;
params->sampleSize = floor( 256 );
}
void testAdagrad (NN_MODEL *model, DEVICE_DATASET *data,
SCRATCH_AREA *scratch ) {
ADAGRAD_PARAMS mParams;
//begin here
fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n");
initAdagradParams( &mParams, data->trainSizeX );
fprintf( stderr, "... Done parms initialization \n\n");
//init weights to ZEROS
cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET );
//init weights to Random Vector
/*
getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL );
copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize,
ERROR_MEMCPY_DEVICE_DEVICE );
real scale = 0.25;
cublasCheckError( cublasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 ));
*/
adagrad ( model, data, scratch, &mParams );
fprintf( stderr, ".... Done testing of Adagrad \n\n\n" );
}
|
7adcd16e1be8322f891dab4e6c52332b11e99b05.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum_channels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
const float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
uint channels = 1;
uint num_channel_elem = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum_channels), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,channels,num_channel_elem);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum_channels), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,channels,num_channel_elem);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum_channels), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,channels,num_channel_elem);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7adcd16e1be8322f891dab4e6c52332b11e99b05.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum_channels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
const float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
uint channels = 1;
uint num_channel_elem = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum_channels<<<gridBlock,threadBlock>>>(dest,src,channels,num_channel_elem);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum_channels<<<gridBlock,threadBlock>>>(dest,src,channels,num_channel_elem);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum_channels<<<gridBlock,threadBlock>>>(dest,src,channels,num_channel_elem);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2ae944feccfbc332aed3d6ae78ae6bce03dc863e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Paralell ant algorithm for the travelling salesman problem using Cuda
//Made by: Arthur Henrique Guimares
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
//Problem Parameters
#define CITIES 1000
#define ANTS 180
#define MAX_DIST 100
#define MAX_TOTAL_DISTANCE (CITIES * MAX_DIST) // MAX possible distance that an ant can walk
#define ALPHA 1
#define BETA 5 //This parameter raises the weight of distance over pheromone
#define RHO 0.5 //Evapouration rate
#define QVAL 100 //
#define MAX_TOURS 50// The number of times an ant will walk trough all the cities
#define INIT_PHER (1.0/CITIES) //Initial hormone for each path
#define BLOCKS 3
#define THREADS ANTS/BLOCKS
//Global structures
struct ant{
// Current city, next city and total of traversed cities
int curCity, nextCity, pathIndex;
// List of visited cities
int visited[CITIES];
// Traversed path
int path[CITIES];
// Length of the traversed path
float tourLength;
};
//CPU
float distances[CITIES][CITIES]; // Distance between city i an j
double hormone[CITIES][CITIES]; //Hormone between city i and j
struct ant ants[ANTS];
float bestdistance[ANTS];
float finalbest = (float)MAX_TOTAL_DISTANCE;
hiprandState_t state[ANTS];
const size_t distances_size = sizeof(float) * size_t(CITIES*CITIES);
const size_t hormone_size = sizeof(double) * size_t(CITIES*CITIES);
//GPU
float *distances_d;
struct ant *ants_d;
double *hormone_d;
float *bestdistance_d;
hiprandState_t *state_d;
//Functions
void get_distances_matrix();
void deviceAlloc();
__global__ void initialize_ants(struct ant *ants_d, hiprandState_t *state_d, float *bestdistance_d);
__global__ void setup_curand_states(hiprandState_t *state_d, unsigned long t);
__global__ void restart_ants(struct ant *ants_d,hiprandState_t *state_d, float *bestdistance_d);
void move_ants();
__global__ void simulate_ants(struct ant *ants_d,hiprandState_t *state_d, float *distances_d, double *hormone_d);
__device__ double antProduct(int from, int to, double *hormone_d, float *distances_d);
__device__ int NextCity(struct ant *ants_d, int pos, float *distances_d, double *hormone_d, hiprandState_t *state_d );
void updateTrails();
int main(){
get_distances_matrix(); // Get the distances between cities from the input
deviceAlloc(); // Mallocs and memcpy of the device variables
//Set up an array of curand_states in order to build better random numbers
time_t t; time(&t);
hipLaunchKernelGGL(( setup_curand_states) , dim3(BLOCKS), dim3(THREADS) , 0, 0, state_d, (unsigned long) t);
hipDeviceSynchronize();
//initialize the ants array
hipLaunchKernelGGL(( initialize_ants) , dim3(BLOCKS), dim3(THREADS) , 0, 0, ants_d, state_d, bestdistance_d);
hipDeviceSynchronize();
// Start and control the ants tours
move_ants();
//Free Memory
hipFree(ants_d);
hipFree(bestdistance_d);
hipFree(distances_d);
hipFree(hormone_d);
hipFree(state_d);
hipFree(bestdistance_d);
return 0;
}
void get_distances_matrix(){
int i,j;
float k;
while(scanf("%i %i %f", &i,&j,&k) == 3){
distances[i][j] = k;
hormone[i][j] = INIT_PHER;
}
}
void deviceAlloc(){
hipMalloc( (void**) &ants_d, sizeof(ants));
hipMalloc( (void**) &state_d, sizeof(state));
hipMalloc( (void**) &distances_d, distances_size);
hipMemcpy(distances_d, distances, distances_size, hipMemcpyHostToDevice);
hipMalloc( (void**) &hormone_d, hormone_size);
hipMemcpy(hormone_d, hormone, hormone_size, hipMemcpyHostToDevice);
hipMalloc( (void**) &bestdistance_d, sizeof(bestdistance));
}
__global__ void setup_curand_states(hiprandState_t *state_d, unsigned long t){
int id = threadIdx.x + blockIdx.x*THREADS;
hiprand_init(t, id, 0, &state_d[id]);
}
__global__ void initialize_ants(struct ant *ants_d, hiprandState_t *state_d, float *bestdistance_d){
int position = threadIdx.x + blockIdx.x*THREADS;
int k;
// Mark all cities as not visited
// Mark all path as not traversed
for(k = 0; k < CITIES; k++){
ants_d[position].visited[k] = 0;
ants_d[position].path[k] = -1;
}
bestdistance_d[position] = (float)MAX_TOTAL_DISTANCE;
//Random City to begin
ants_d[position].curCity = hiprand(&state_d[position])% CITIES;
//
ants_d[position].pathIndex = 1;
ants_d[position].path[0] = ants_d[position].curCity;
ants_d[position].nextCity = -1;
ants_d[position].tourLength = 0;
ants_d[position].visited[ants_d[position].curCity] = 1;
}
__global__ void restart_ants(struct ant *ants_d,hiprandState_t *state_d, float *bestdistance_d){
int position = threadIdx.x + blockIdx.x*THREADS;
int i;
if(ants_d[position].tourLength < bestdistance_d[position]){
bestdistance_d[position] = ants_d[position].tourLength;
}
ants_d[position].nextCity = -1;
ants_d[position].tourLength = 0.0;
for(i = 0; i < CITIES; i++){
ants_d[position].visited[i] = 0;
ants_d[position].path[i] = -1;
}
ants_d[position].curCity = hiprand(&state_d[position])% CITIES;
ants_d[position].pathIndex = 1;
ants_d[position].path[0] = ants_d[position].curCity;
ants_d[position].visited[ants_d[position].curCity] = 1;
}
void move_ants(){
int curtour = 0;
while (curtour++ < MAX_TOURS){
hipLaunchKernelGGL(( simulate_ants) , dim3(BLOCKS), dim3(THREADS) , 0, 0, ants_d, state_d, distances_d, hormone_d);
hipDeviceSynchronize();
hipMemcpy(ants, ants_d, sizeof(ants), hipMemcpyDeviceToHost);
updateTrails();
hipMemcpy(hormone_d, hormone, hormone_size, hipMemcpyHostToDevice);
int i;
hipMemcpy(bestdistance, bestdistance_d, sizeof(bestdistance), hipMemcpyDeviceToHost);
for(i =0; i < ANTS; i++)
if(bestdistance[i] < finalbest){
finalbest = bestdistance[i];
}
printf("Best distance %f \n", finalbest);
hipLaunchKernelGGL(( restart_ants) , dim3(BLOCKS), dim3(THREADS) , 0, 0, ants_d, state_d, bestdistance_d);
hipDeviceSynchronize();
}
}
__global__ void simulate_ants(struct ant *ants_d,hiprandState_t *state_d, float *distances_d, double *hormone_d ){
int position = threadIdx.x + blockIdx.x*THREADS;
int curtime = 0;
while(curtime++ < CITIES){
if( ants_d[position].pathIndex < CITIES ){ //check if all cities were visited
// Choose the next city to visit
ants_d[position].nextCity = NextCity(ants_d, position, distances_d, hormone_d, state_d);
// Mark the city as visited
ants_d[position].visited[ants_d[position].nextCity] = 1;
// Mark when the city was visited
ants_d[position].path[ants_d[position].pathIndex++] = ants_d[position].nextCity;
// Add to the tour the distance between the curCity and the NextCity
ants_d[position].tourLength += distances_d[ants_d[position].curCity + (ants_d[position].nextCity * CITIES)];
// Handle the last case -> path from last to first city
if(ants_d[position].pathIndex == CITIES){
ants_d[position].tourLength += distances_d[ants_d[position].path[CITIES -1] + (ants_d[position].path[0]*CITIES)];
}
// Mark NextCity as the curCity
ants_d[position].curCity = ants_d[position].nextCity;
}
}
}
__device__ double antProduct(int from, int to, double *hormone_d, float *distances_d){
return (double) (( pow( hormone_d[from + to*CITIES], ALPHA) * pow( (1.0/ distances_d[from + to*CITIES]), BETA)));
}
__device__ int NextCity(struct ant *ants_d, int pos, float *distances_d, double *hormone_d, hiprandState_t *state_d ){
int to, from;
double denom = 0.0;
from = ants_d[pos].curCity;
for(to = 0; to < CITIES; to++){
if(ants_d[pos].visited[to] == 0){
denom += antProduct(from, to, hormone_d, distances_d);
}
}
assert(denom != 0.0);
to++;
int count = CITIES - ants_d[pos].pathIndex;
do{
double p;
to++;
if(to >= CITIES)
to = 0;
if(ants_d[pos].visited[to] == 0){
p = (double) antProduct(from, to, hormone_d, distances_d)/denom;
double x = (double)(hiprand(&state_d[pos])% 1000000000000000000)/1000000000000000000;
if(x < p){
break;
}
count--;
if(count == 0){
break;
}
}
}while(1);
return to;
}
void updateTrails(){
int from,to,i,ant;
//hormone evaporation
for(from = 0; from < CITIES; from++)
for(to = 0;to < CITIES; to++){
if(from!=to){
hormone[from][to] *=( 1.0 - RHO);
if(hormone[from][to] < 0.0){
hormone[from][to] = INIT_PHER;
}
}
}
//add new pheromone to the trails
for(ant = 0; ant < ANTS; ant++)
for(i = 0; i < CITIES; i++){
if( i < CITIES - 1 ){
from = ants[ant].path[i];
to = ants[ant].path[i+1];
}
else{
from = ants[ant].path[i];
to = ants[ant].path[0];
}
hormone[from][to] += (QVAL/ ants[ant].tourLength);
hormone[to][from] = hormone[from][to];
}
for (from = 0; from < CITIES; from++)
for( to = 0; to < CITIES; to++){
hormone[from][to] *= RHO;
}
}
| 2ae944feccfbc332aed3d6ae78ae6bce03dc863e.cu | //Paralell ant algorithm for the travelling salesman problem using Cuda
//Made by: Arthur Henrique Guimarães
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <curand_kernel.h>
//Problem Parameters
#define CITIES 1000
#define ANTS 180
#define MAX_DIST 100
#define MAX_TOTAL_DISTANCE (CITIES * MAX_DIST) // MAX possible distance that an ant can walk
#define ALPHA 1
#define BETA 5 //This parameter raises the weight of distance over pheromone
#define RHO 0.5 //Evapouration rate
#define QVAL 100 //
#define MAX_TOURS 50// The number of times an ant will walk trough all the cities
#define INIT_PHER (1.0/CITIES) //Initial hormone for each path
#define BLOCKS 3
#define THREADS ANTS/BLOCKS
//Global structures
struct ant{
// Current city, next city and total of traversed cities
int curCity, nextCity, pathIndex;
// List of visited cities
int visited[CITIES];
// Traversed path
int path[CITIES];
// Length of the traversed path
float tourLength;
};
//CPU
float distances[CITIES][CITIES]; // Distance between city i an j
double hormone[CITIES][CITIES]; //Hormone between city i and j
struct ant ants[ANTS];
float bestdistance[ANTS];
float finalbest = (float)MAX_TOTAL_DISTANCE;
curandState state[ANTS];
const size_t distances_size = sizeof(float) * size_t(CITIES*CITIES);
const size_t hormone_size = sizeof(double) * size_t(CITIES*CITIES);
//GPU
float *distances_d;
struct ant *ants_d;
double *hormone_d;
float *bestdistance_d;
curandState *state_d;
//Functions
void get_distances_matrix();
void deviceAlloc();
__global__ void initialize_ants(struct ant *ants_d, curandState *state_d, float *bestdistance_d);
__global__ void setup_curand_states(curandState *state_d, unsigned long t);
__global__ void restart_ants(struct ant *ants_d,curandState *state_d, float *bestdistance_d);
void move_ants();
__global__ void simulate_ants(struct ant *ants_d,curandState *state_d, float *distances_d, double *hormone_d);
__device__ double antProduct(int from, int to, double *hormone_d, float *distances_d);
__device__ int NextCity(struct ant *ants_d, int pos, float *distances_d, double *hormone_d, curandState *state_d );
void updateTrails();
int main(){
get_distances_matrix(); // Get the distances between cities from the input
deviceAlloc(); // Mallocs and memcpy of the device variables
//Set up an array of curand_states in order to build better random numbers
time_t t; time(&t);
setup_curand_states <<< BLOCKS, THREADS >>> (state_d, (unsigned long) t);
cudaThreadSynchronize();
//initialize the ants array
initialize_ants <<< BLOCKS, THREADS >>> (ants_d, state_d, bestdistance_d);
cudaThreadSynchronize();
// Start and control the ants tours
move_ants();
//Free Memory
cudaFree(ants_d);
cudaFree(bestdistance_d);
cudaFree(distances_d);
cudaFree(hormone_d);
cudaFree(state_d);
cudaFree(bestdistance_d);
return 0;
}
void get_distances_matrix(){
int i,j;
float k;
while(scanf("%i %i %f", &i,&j,&k) == 3){
distances[i][j] = k;
hormone[i][j] = INIT_PHER;
}
}
void deviceAlloc(){
cudaMalloc( (void**) &ants_d, sizeof(ants));
cudaMalloc( (void**) &state_d, sizeof(state));
cudaMalloc( (void**) &distances_d, distances_size);
cudaMemcpy(distances_d, distances, distances_size, cudaMemcpyHostToDevice);
cudaMalloc( (void**) &hormone_d, hormone_size);
cudaMemcpy(hormone_d, hormone, hormone_size, cudaMemcpyHostToDevice);
cudaMalloc( (void**) &bestdistance_d, sizeof(bestdistance));
}
__global__ void setup_curand_states(curandState *state_d, unsigned long t){
int id = threadIdx.x + blockIdx.x*THREADS;
curand_init(t, id, 0, &state_d[id]);
}
__global__ void initialize_ants(struct ant *ants_d, curandState *state_d, float *bestdistance_d){
int position = threadIdx.x + blockIdx.x*THREADS;
int k;
// Mark all cities as not visited
// Mark all path as not traversed
for(k = 0; k < CITIES; k++){
ants_d[position].visited[k] = 0;
ants_d[position].path[k] = -1;
}
bestdistance_d[position] = (float)MAX_TOTAL_DISTANCE;
//Random City to begin
ants_d[position].curCity = curand(&state_d[position])% CITIES;
//
ants_d[position].pathIndex = 1;
ants_d[position].path[0] = ants_d[position].curCity;
ants_d[position].nextCity = -1;
ants_d[position].tourLength = 0;
ants_d[position].visited[ants_d[position].curCity] = 1;
}
__global__ void restart_ants(struct ant *ants_d,curandState *state_d, float *bestdistance_d){
int position = threadIdx.x + blockIdx.x*THREADS;
int i;
if(ants_d[position].tourLength < bestdistance_d[position]){
bestdistance_d[position] = ants_d[position].tourLength;
}
ants_d[position].nextCity = -1;
ants_d[position].tourLength = 0.0;
for(i = 0; i < CITIES; i++){
ants_d[position].visited[i] = 0;
ants_d[position].path[i] = -1;
}
ants_d[position].curCity = curand(&state_d[position])% CITIES;
ants_d[position].pathIndex = 1;
ants_d[position].path[0] = ants_d[position].curCity;
ants_d[position].visited[ants_d[position].curCity] = 1;
}
void move_ants(){
int curtour = 0;
while (curtour++ < MAX_TOURS){
simulate_ants <<< BLOCKS, THREADS >>> (ants_d, state_d, distances_d, hormone_d);
cudaThreadSynchronize();
cudaMemcpy(ants, ants_d, sizeof(ants), cudaMemcpyDeviceToHost);
updateTrails();
cudaMemcpy(hormone_d, hormone, hormone_size, cudaMemcpyHostToDevice);
int i;
cudaMemcpy(bestdistance, bestdistance_d, sizeof(bestdistance), cudaMemcpyDeviceToHost);
for(i =0; i < ANTS; i++)
if(bestdistance[i] < finalbest){
finalbest = bestdistance[i];
}
printf("Best distance %f \n", finalbest);
restart_ants <<< BLOCKS, THREADS >>> (ants_d, state_d, bestdistance_d);
cudaThreadSynchronize();
}
}
__global__ void simulate_ants(struct ant *ants_d,curandState *state_d, float *distances_d, double *hormone_d ){
int position = threadIdx.x + blockIdx.x*THREADS;
int curtime = 0;
while(curtime++ < CITIES){
if( ants_d[position].pathIndex < CITIES ){ //check if all cities were visited
// Choose the next city to visit
ants_d[position].nextCity = NextCity(ants_d, position, distances_d, hormone_d, state_d);
// Mark the city as visited
ants_d[position].visited[ants_d[position].nextCity] = 1;
// Mark when the city was visited
ants_d[position].path[ants_d[position].pathIndex++] = ants_d[position].nextCity;
// Add to the tour the distance between the curCity and the NextCity
ants_d[position].tourLength += distances_d[ants_d[position].curCity + (ants_d[position].nextCity * CITIES)];
// Handle the last case -> path from last to first city
if(ants_d[position].pathIndex == CITIES){
ants_d[position].tourLength += distances_d[ants_d[position].path[CITIES -1] + (ants_d[position].path[0]*CITIES)];
}
// Mark NextCity as the curCity
ants_d[position].curCity = ants_d[position].nextCity;
}
}
}
__device__ double antProduct(int from, int to, double *hormone_d, float *distances_d){
return (double) (( pow( hormone_d[from + to*CITIES], ALPHA) * pow( (1.0/ distances_d[from + to*CITIES]), BETA)));
}
__device__ int NextCity(struct ant *ants_d, int pos, float *distances_d, double *hormone_d, curandState *state_d ){
int to, from;
double denom = 0.0;
from = ants_d[pos].curCity;
for(to = 0; to < CITIES; to++){
if(ants_d[pos].visited[to] == 0){
denom += antProduct(from, to, hormone_d, distances_d);
}
}
assert(denom != 0.0);
to++;
int count = CITIES - ants_d[pos].pathIndex;
do{
double p;
to++;
if(to >= CITIES)
to = 0;
if(ants_d[pos].visited[to] == 0){
p = (double) antProduct(from, to, hormone_d, distances_d)/denom;
double x = (double)(curand(&state_d[pos])% 1000000000000000000)/1000000000000000000;
if(x < p){
break;
}
count--;
if(count == 0){
break;
}
}
}while(1);
return to;
}
void updateTrails(){
int from,to,i,ant;
//hormone evaporation
for(from = 0; from < CITIES; from++)
for(to = 0;to < CITIES; to++){
if(from!=to){
hormone[from][to] *=( 1.0 - RHO);
if(hormone[from][to] < 0.0){
hormone[from][to] = INIT_PHER;
}
}
}
//add new pheromone to the trails
for(ant = 0; ant < ANTS; ant++)
for(i = 0; i < CITIES; i++){
if( i < CITIES - 1 ){
from = ants[ant].path[i];
to = ants[ant].path[i+1];
}
else{
from = ants[ant].path[i];
to = ants[ant].path[0];
}
hormone[from][to] += (QVAL/ ants[ant].tourLength);
hormone[to][from] = hormone[from][to];
}
for (from = 0; from < CITIES; from++)
for( to = 0; to < CITIES; to++){
hormone[from][to] *= RHO;
}
}
|
7538d7725e02148b2648972579d7581e03bcbc06.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021 dePaul Miller ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include "gtest/gtest.h"
#include "testheader.cuh"
TEST(slabunified_test, MemoryLeakageTest) {
const int size = 1000;
std::hash<unsigned> hfn;
SlabUnified<unsigned, int *> s(size);
auto b = new BatchBuffer<unsigned, int *>();
s.setGPU();
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = 1;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = 1;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(hipStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
delete[] b->getBatchValues()[j];
}
}
}
for (int rep = 0; rep < 100; rep++) {
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = 1;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = 1;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(hipStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
delete[] b->getBatchValues()[j];
}
}
}
}
delete b;
}
TEST(slabunified_test, GetPutTest) {
static_assert(EMPTY<int*>::value == nullptr, "Need this to be true so GTEST works.");
const int size = 1000;
std::hash<unsigned> hfn;
SlabUnified<unsigned, int *> s(size);
auto b = new BatchBuffer<unsigned, int *>();
s.setGPU();
for (int rep = 0; rep < 100; rep++) {
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = rep;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(hipStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
GTEST_ASSERT_NE(b->getBatchValues()[j], nullptr);
for (int w = 0; w < 256; w++) {
GTEST_ASSERT_EQ(b->getBatchValues()[j][w], rep - 1) << " old insert was rep - 1";
}
delete[] b->getBatchValues()[j];
}
}
}
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_GET;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(hipStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
delete[] b->getBatchValues()[j];
}
if (b->getBatchRequests()[j] == REQUEST_GET) {
GTEST_ASSERT_NE(b->getBatchValues()[j], nullptr);
for (int w = 0; w < 256; w++) {
GTEST_ASSERT_EQ(b->getBatchValues()[j][w], rep) << " last insert was rep";
}
}
}
}
}
delete b;
}
TEST(slabunified_test, PutRemoveTest) {
static_assert(EMPTY<int*>::value == nullptr, "Need this to be true so GTEST works.");
const int size = 1000;
std::hash<unsigned> hfn;
SlabUnified<unsigned, int *> s(size);
auto b = new BatchBuffer<unsigned, int *>();
s.setGPU();
for (int rep = 0; rep < 100; rep++) {
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = rep;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(hipStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT) {
GTEST_ASSERT_EQ(b->getBatchValues()[j], nullptr) << " should always be reading nullptr last";
}
}
}
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_REMOVE;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(hipStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_REMOVE) {
GTEST_ASSERT_NE(b->getBatchValues()[j], nullptr) << " key value pair was inserted on key";
for (int w = 0; w < 256; w++) {
GTEST_ASSERT_EQ(b->getBatchValues()[j][w], rep) << " last insert was rep";
}
delete[] b->getBatchValues()[j];
}
}
}
}
delete b;
}
| 7538d7725e02148b2648972579d7581e03bcbc06.cu | /*
* Copyright (c) 2020-2021 dePaul Miller ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include "gtest/gtest.h"
#include "testheader.cuh"
TEST(slabunified_test, MemoryLeakageTest) {
const int size = 1000;
std::hash<unsigned> hfn;
SlabUnified<unsigned, int *> s(size);
auto b = new BatchBuffer<unsigned, int *>();
s.setGPU();
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = 1;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = 1;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(cudaStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
delete[] b->getBatchValues()[j];
}
}
}
for (int rep = 0; rep < 100; rep++) {
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = 1;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = 1;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(cudaStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
delete[] b->getBatchValues()[j];
}
}
}
}
delete b;
}
TEST(slabunified_test, GetPutTest) {
static_assert(EMPTY<int*>::value == nullptr, "Need this to be true so GTEST works.");
const int size = 1000;
std::hash<unsigned> hfn;
SlabUnified<unsigned, int *> s(size);
auto b = new BatchBuffer<unsigned, int *>();
s.setGPU();
for (int rep = 0; rep < 100; rep++) {
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = rep;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(cudaStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
GTEST_ASSERT_NE(b->getBatchValues()[j], nullptr);
for (int w = 0; w < 256; w++) {
GTEST_ASSERT_EQ(b->getBatchValues()[j][w], rep - 1) << " old insert was rep - 1";
}
delete[] b->getBatchValues()[j];
}
}
}
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_GET;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(cudaStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT && b->getBatchValues()[j] != EMPTY<int *>::value) {
delete[] b->getBatchValues()[j];
}
if (b->getBatchRequests()[j] == REQUEST_GET) {
GTEST_ASSERT_NE(b->getBatchValues()[j], nullptr);
for (int w = 0; w < 256; w++) {
GTEST_ASSERT_EQ(b->getBatchValues()[j][w], rep) << " last insert was rep";
}
}
}
}
}
delete b;
}
TEST(slabunified_test, PutRemoveTest) {
static_assert(EMPTY<int*>::value == nullptr, "Need this to be true so GTEST works.");
const int size = 1000;
std::hash<unsigned> hfn;
SlabUnified<unsigned, int *> s(size);
auto b = new BatchBuffer<unsigned, int *>();
s.setGPU();
for (int rep = 0; rep < 100; rep++) {
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
int *value = new int[256]; // allocating 1KB
for (int w = 0; w < 256; w++) {
value[w] = rep;
}
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_INSERT;
b->getBatchValues()[j] = value;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(cudaStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_INSERT) {
GTEST_ASSERT_EQ(b->getBatchValues()[j], nullptr) << " should always be reading nullptr last";
}
}
}
for (unsigned i = 0; i < (unsigned) size; i += THREADS_PER_BLOCK * BLOCKS) {
unsigned j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS && i * THREADS_PER_BLOCK * BLOCKS + j < size; j++) {
unsigned key = j;
b->getBatchKeys()[j] = key;
b->getHashValues()[j] = hfn(key);
b->getBatchRequests()[j] = REQUEST_REMOVE;
}
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
b->getBatchRequests()[j] = REQUEST_EMPTY;
}
s.moveBufferToGPU(b, 0x0);
s.diy_batch(b, BLOCKS, THREADS_PER_BLOCK, 0x0);
s.moveBufferToCPU(b, 0x0);
gpuErrchk(cudaStreamSynchronize(0x0));
j = 0;
for (; j < THREADS_PER_BLOCK * BLOCKS; j++) {
if (b->getBatchRequests()[j] == REQUEST_REMOVE) {
GTEST_ASSERT_NE(b->getBatchValues()[j], nullptr) << " key value pair was inserted on key";
for (int w = 0; w < 256; w++) {
GTEST_ASSERT_EQ(b->getBatchValues()[j][w], rep) << " last insert was rep";
}
delete[] b->getBatchValues()[j];
}
}
}
}
delete b;
}
|
3729ea5eef935a04b65e1e93e0a4d494d16b49e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <limits.h>
#define M_PI 3.1415926535897
#define VECTOR_COUNT 2
hipError_t computeElementsHelper(int* a, int* b, int* lengthNoSqrt, int* dotProduct, int N, int blockSize);
__global__ void computeElementsKernel(int* lengthNoSqrt, int* product, int* a, int* b, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
//printf("Doing something with thread %d\n", i);
//printf("Element: %d %d\n", a[i], b[i]);
//find the dot product.
atomicAdd(product, a[i] * b[i]);
//printf("Sumsquares one before: %d\n", lengthNoSqrt[0]);
//printf("Sumsquares two before: %d\n", lengthNoSqrt[1]);
atomicAdd(&(lengthNoSqrt[0]), a[i] * a[i]);
atomicAdd(&(lengthNoSqrt[1]), b[i] * b[i]);
//printf("Sumsquares one after: %d\n", lengthNoSqrt[0]);
//printf("Sumsquares two after: %d\n", lengthNoSqrt[1]);
}
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int* genVector(int N) {
int* vector = (int*)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++) {
int randNum = rand() % 20 - 10;
vector[i] = randNum;
}
return vector;
}
int findDotProduct(int* a, int* b, int N) {
int sum = 0;
for (int i = 0; i < N; i++) {
sum = sum + (a[i] * b[i]);
}
return sum;
}
void printArray(int* x, int size) {
for (int i = 0; i < size; i++) {
printf("arr[%d] = %d\n", i, x[i]);
}
}
double findVectorLength(int* x, int N) {
int sumSquares = 0;
for (int i = 0; i < N; i++) {
sumSquares = sumSquares + pow(x[i], 2);
}
//printf("SumSquares serial: %d\n", sumSquares);
double distance = sqrt(sumSquares);
return distance;
}
double convertToDegrees(double rad) {
return rad * (180 / M_PI);
}
void printDeviceProperties() {
printf("--------------------DEVICE PROPERTIES----------------------\n\n");
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6);
}
}
double doTheSerialThing(int* vectorOne, int* vectorTwo, int SIZE) {
//printf("-----------------SERIAL IMPLEMENTATION----------------------\n\n");
double dotProduct = (double)findDotProduct(vectorOne, vectorTwo, SIZE);
double vectorLengthOne = findVectorLength(vectorOne, SIZE);
double vectorLengthTwo = findVectorLength(vectorTwo, SIZE);
double cosTheta = dotProduct / (vectorLengthOne * vectorLengthTwo);
double angleInRadians = acos(cosTheta);
double angleInDegrees = convertToDegrees(angleInRadians);
//printf("length one: %f\n", vectorLengthOne);
//printf("length two: %f\n", vectorLengthTwo);
//printf("Angle in radians: %f\n", angleInRadians);
//printArray(vectorOne, SIZE);
//printArray(vectorTwo, SIZE);
//printf("DOT PRODUCT SERIAL: %f\n", dotProduct);
return angleInDegrees;
}
int main(int argc, char** argv)
{
//Before beginning, print device properties.
//printDeviceProperties();
srand(time(NULL));
clock_t start, end;
double cpu_time_used;
int SIZE = atoi(argv[1]);
int BLOCK_SIZE = atoi(argv[2]);
int* vectorOne = NULL;
int* vectorTwo = NULL;
int lengthsNoSqrt[VECTOR_COUNT] = { 0 };
int dotProduct[1] = { 0 };
double angleSerial = 0;
int numberBlocks = 0;
if (SIZE % BLOCK_SIZE == 0)
numberBlocks = SIZE / BLOCK_SIZE;
else
numberBlocks = (SIZE / BLOCK_SIZE) + 1;
printf("Info\n------------------\n");
printf("Number of elements: %d\n", SIZE);
printf("Number of threads per block: %d\n", BLOCK_SIZE);
printf("Number of blocks will be created: %d\n\n", numberBlocks);
//arrays will be generated
if (argc == 3) {
printf("Time\n------------------\n");
start = clock();
vectorOne = genVector(SIZE);
vectorTwo = genVector(SIZE);
end = clock();
cpu_time_used = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the array generation : %f ms\n", cpu_time_used);
}
//arrays will be read from file.
else if (argc == 4) {
char const* const fileName = argv[3]; /* should check that argc > 1 */
FILE* file = fopen(fileName, "r"); /* should check the result */
char line[256];
fgets(line, sizeof(line), file);
int count = atoi(line);
int* allArray = (int*)malloc(sizeof(int) * count * 2);
vectorOne = (int*)malloc(sizeof(int) * count);
vectorTwo = (int*)malloc(sizeof(int) * count);
int i = 0;
//printf("COUNT: %d\n", count);
while (fgets(line, sizeof(line), file)) {
/* note that fgets don't strip the terminating \n, checking its
presence would allow to handle lines longer that sizeof(line) */
int number = atoi(line);
allArray[i] = number;
i++;
}
/* may check feof here to make a difference between eof and io failure -- network
timeout for instance */
/*
for (int i = 0; i < count; i++) {
printf("allArray[%d] = %d\n", i, allArray[i]);
}
*/
for (int i = 0; i < count; i++) {
vectorOne[i] = allArray[i];
}
for (int i = count; i < count * 2; i++) {
vectorTwo[i - count] = allArray[i];
}
fclose(file);
}
else {
printf("GIVE APPROPRIATE NUMBER OF ARGUMENTS PLEASE!!!\n");
return 0;
}
start = clock();
angleSerial = doTheSerialThing(vectorOne, vectorTwo, SIZE);
end = clock();
cpu_time_used = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the CPU function: %f ms\n", cpu_time_used);
//printf("---------------------PARALLEL IMPLEMENTATION-----------------\n\n");
// Calculate angle with CUDA.
hipError_t cudaStatus = computeElementsHelper(vectorOne, vectorTwo, lengthsNoSqrt, dotProduct, SIZE, BLOCK_SIZE);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "computeElements failed!");
return 1;
}
// find the angle here.
double lenOne = sqrt( (double) lengthsNoSqrt[0]);
double lenTwo = sqrt( (double) lengthsNoSqrt[1]);
double cosTheta = ( ((double) (dotProduct[0])) / (lenOne * lenTwo));
double angleInRadians = acos(cosTheta);
double angle = convertToDegrees(angleInRadians);
printf("\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
printf("Results\n-----------------\n");
printf("CPU Result: %0.3f\n", angleSerial);
printf("GPU Result: %0.3f\n", angle);
return 0;
}
hipError_t computeElementsHelper(int* a, int* b, int* lengthNoSqrt, int* dotProduct, int N, int blockSize)
{
int* dev_a = 0;
int* dev_b = 0;
int* dev_lengthNoSqrt = 0;
int* dev_product = 0;
hipError_t cudaStatus;
clock_t start, end;
double timeUsed;
double totalGpuTime = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
}
cudaStatus = hipMalloc((void**)&dev_a, N * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc dev a failed!\n");
}
cudaStatus = hipMalloc((void**)&dev_b, N * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc dev b failed!\n");
}
cudaStatus = hipMalloc((void**)&dev_lengthNoSqrt, VECTOR_COUNT * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc dev length failed!\n");
}
cudaStatus = hipMalloc((void**)&dev_product, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc dev product failed!\n");
}
// Copy input vectors from host memory to GPU buffers.
start = clock();
cudaStatus = hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
}
cudaStatus = hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
totalGpuTime += timeUsed;
printf("Time for the Host to Device transfer: %f ms\n", timeUsed);
// Launch a kernel on the GPU with one thread for each element.
int numberBlocks = 0;
if (N % blockSize == 0)
numberBlocks = N / blockSize;
else
numberBlocks = (N / blockSize) + 1;
start = clock();
hipLaunchKernelGGL(( computeElementsKernel) , dim3(numberBlocks), dim3(blockSize) , 0, 0, dev_lengthNoSqrt, dev_product, dev_a, dev_b, N);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "computeElementsKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
totalGpuTime += timeUsed;
printf("Time for the kernel execution: %f ms\n", timeUsed);
start = clock();
cudaStatus = hipMemcpy(lengthNoSqrt, dev_lengthNoSqrt, VECTOR_COUNT * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy for dev_lengths failed!\n");
}
cudaStatus = hipMemcpy(dotProduct, dev_product, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy for dotProduct failed!\n");
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the Device to Host transfer: %f ms\n", timeUsed);
totalGpuTime += timeUsed;
printf("Total execution time for GPU: %f ms\n", totalGpuTime);
hipFree(dev_product);
hipFree(dev_lengthNoSqrt);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
} | 3729ea5eef935a04b65e1e93e0a4d494d16b49e4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <limits.h>
#define M_PI 3.1415926535897
#define VECTOR_COUNT 2
cudaError_t computeElementsHelper(int* a, int* b, int* lengthNoSqrt, int* dotProduct, int N, int blockSize);
__global__ void computeElementsKernel(int* lengthNoSqrt, int* product, int* a, int* b, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
//printf("Doing something with thread %d\n", i);
//printf("Element: %d %d\n", a[i], b[i]);
//find the dot product.
atomicAdd(product, a[i] * b[i]);
//printf("Sumsquares one before: %d\n", lengthNoSqrt[0]);
//printf("Sumsquares two before: %d\n", lengthNoSqrt[1]);
atomicAdd(&(lengthNoSqrt[0]), a[i] * a[i]);
atomicAdd(&(lengthNoSqrt[1]), b[i] * b[i]);
//printf("Sumsquares one after: %d\n", lengthNoSqrt[0]);
//printf("Sumsquares two after: %d\n", lengthNoSqrt[1]);
}
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int* genVector(int N) {
int* vector = (int*)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++) {
int randNum = rand() % 20 - 10;
vector[i] = randNum;
}
return vector;
}
int findDotProduct(int* a, int* b, int N) {
int sum = 0;
for (int i = 0; i < N; i++) {
sum = sum + (a[i] * b[i]);
}
return sum;
}
void printArray(int* x, int size) {
for (int i = 0; i < size; i++) {
printf("arr[%d] = %d\n", i, x[i]);
}
}
double findVectorLength(int* x, int N) {
int sumSquares = 0;
for (int i = 0; i < N; i++) {
sumSquares = sumSquares + pow(x[i], 2);
}
//printf("SumSquares serial: %d\n", sumSquares);
double distance = sqrt(sumSquares);
return distance;
}
double convertToDegrees(double rad) {
return rad * (180 / M_PI);
}
void printDeviceProperties() {
printf("--------------------DEVICE PROPERTIES----------------------\n\n");
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6);
}
}
double doTheSerialThing(int* vectorOne, int* vectorTwo, int SIZE) {
//printf("-----------------SERIAL IMPLEMENTATION----------------------\n\n");
double dotProduct = (double)findDotProduct(vectorOne, vectorTwo, SIZE);
double vectorLengthOne = findVectorLength(vectorOne, SIZE);
double vectorLengthTwo = findVectorLength(vectorTwo, SIZE);
double cosTheta = dotProduct / (vectorLengthOne * vectorLengthTwo);
double angleInRadians = acos(cosTheta);
double angleInDegrees = convertToDegrees(angleInRadians);
//printf("length one: %f\n", vectorLengthOne);
//printf("length two: %f\n", vectorLengthTwo);
//printf("Angle in radians: %f\n", angleInRadians);
//printArray(vectorOne, SIZE);
//printArray(vectorTwo, SIZE);
//printf("DOT PRODUCT SERIAL: %f\n", dotProduct);
return angleInDegrees;
}
int main(int argc, char** argv)
{
//Before beginning, print device properties.
//printDeviceProperties();
srand(time(NULL));
clock_t start, end;
double cpu_time_used;
int SIZE = atoi(argv[1]);
int BLOCK_SIZE = atoi(argv[2]);
int* vectorOne = NULL;
int* vectorTwo = NULL;
int lengthsNoSqrt[VECTOR_COUNT] = { 0 };
int dotProduct[1] = { 0 };
double angleSerial = 0;
int numberBlocks = 0;
if (SIZE % BLOCK_SIZE == 0)
numberBlocks = SIZE / BLOCK_SIZE;
else
numberBlocks = (SIZE / BLOCK_SIZE) + 1;
printf("Info\n------------------\n");
printf("Number of elements: %d\n", SIZE);
printf("Number of threads per block: %d\n", BLOCK_SIZE);
printf("Number of blocks will be created: %d\n\n", numberBlocks);
//arrays will be generated
if (argc == 3) {
printf("Time\n------------------\n");
start = clock();
vectorOne = genVector(SIZE);
vectorTwo = genVector(SIZE);
end = clock();
cpu_time_used = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the array generation : %f ms\n", cpu_time_used);
}
//arrays will be read from file.
else if (argc == 4) {
char const* const fileName = argv[3]; /* should check that argc > 1 */
FILE* file = fopen(fileName, "r"); /* should check the result */
char line[256];
fgets(line, sizeof(line), file);
int count = atoi(line);
int* allArray = (int*)malloc(sizeof(int) * count * 2);
vectorOne = (int*)malloc(sizeof(int) * count);
vectorTwo = (int*)malloc(sizeof(int) * count);
int i = 0;
//printf("COUNT: %d\n", count);
while (fgets(line, sizeof(line), file)) {
/* note that fgets don't strip the terminating \n, checking its
presence would allow to handle lines longer that sizeof(line) */
int number = atoi(line);
allArray[i] = number;
i++;
}
/* may check feof here to make a difference between eof and io failure -- network
timeout for instance */
/*
for (int i = 0; i < count; i++) {
printf("allArray[%d] = %d\n", i, allArray[i]);
}
*/
for (int i = 0; i < count; i++) {
vectorOne[i] = allArray[i];
}
for (int i = count; i < count * 2; i++) {
vectorTwo[i - count] = allArray[i];
}
fclose(file);
}
else {
printf("GIVE APPROPRIATE NUMBER OF ARGUMENTS PLEASE!!!\n");
return 0;
}
start = clock();
angleSerial = doTheSerialThing(vectorOne, vectorTwo, SIZE);
end = clock();
cpu_time_used = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the CPU function: %f ms\n", cpu_time_used);
//printf("---------------------PARALLEL IMPLEMENTATION-----------------\n\n");
// Calculate angle with CUDA.
cudaError_t cudaStatus = computeElementsHelper(vectorOne, vectorTwo, lengthsNoSqrt, dotProduct, SIZE, BLOCK_SIZE);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "computeElements failed!");
return 1;
}
// find the angle here.
double lenOne = sqrt( (double) lengthsNoSqrt[0]);
double lenTwo = sqrt( (double) lengthsNoSqrt[1]);
double cosTheta = ( ((double) (dotProduct[0])) / (lenOne * lenTwo));
double angleInRadians = acos(cosTheta);
double angle = convertToDegrees(angleInRadians);
printf("\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
printf("Results\n-----------------\n");
printf("CPU Result: %0.3f\n", angleSerial);
printf("GPU Result: %0.3f\n", angle);
return 0;
}
cudaError_t computeElementsHelper(int* a, int* b, int* lengthNoSqrt, int* dotProduct, int N, int blockSize)
{
int* dev_a = 0;
int* dev_b = 0;
int* dev_lengthNoSqrt = 0;
int* dev_product = 0;
cudaError_t cudaStatus;
clock_t start, end;
double timeUsed;
double totalGpuTime = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
}
cudaStatus = cudaMalloc((void**)&dev_a, N * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev a failed!\n");
}
cudaStatus = cudaMalloc((void**)&dev_b, N * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev b failed!\n");
}
cudaStatus = cudaMalloc((void**)&dev_lengthNoSqrt, VECTOR_COUNT * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev length failed!\n");
}
cudaStatus = cudaMalloc((void**)&dev_product, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev product failed!\n");
}
// Copy input vectors from host memory to GPU buffers.
start = clock();
cudaStatus = cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
}
cudaStatus = cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
totalGpuTime += timeUsed;
printf("Time for the Host to Device transfer: %f ms\n", timeUsed);
// Launch a kernel on the GPU with one thread for each element.
int numberBlocks = 0;
if (N % blockSize == 0)
numberBlocks = N / blockSize;
else
numberBlocks = (N / blockSize) + 1;
start = clock();
computeElementsKernel <<< numberBlocks, blockSize >>> (dev_lengthNoSqrt, dev_product, dev_a, dev_b, N);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "computeElementsKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
totalGpuTime += timeUsed;
printf("Time for the kernel execution: %f ms\n", timeUsed);
start = clock();
cudaStatus = cudaMemcpy(lengthNoSqrt, dev_lengthNoSqrt, VECTOR_COUNT * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for dev_lengths failed!\n");
}
cudaStatus = cudaMemcpy(dotProduct, dev_product, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for dotProduct failed!\n");
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the Device to Host transfer: %f ms\n", timeUsed);
totalGpuTime += timeUsed;
printf("Total execution time for GPU: %f ms\n", totalGpuTime);
cudaFree(dev_product);
cudaFree(dev_lengthNoSqrt);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
} |
8ba8f65a7a402af942bba106c157116dca21c849.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
//bool g_verbose;
//bool g_undirected;
//bool g_quick;
//bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair
{
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) :
vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n"
" Generate Random Geometry Graph as input\n"
" --rgg_scale=<vertex-scale>\n"
" --rgg_nodes=<number-nodes>\n"
" --rgg_thfactor=<threshold-factor>\n"
" --rgg_threshold=<threshold>\n"
" --rgg_vmultipiler=<vmultipiler>\n"
" --rgg_seed=<seed>\n\n"
"Optional arguments:\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--undirected] Treat the graph as undirected (symmetric).\n"
"[--instrumented] Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty.\n"
" (a relative indicator of load imbalance.)\n"
"[--quick] Skip the CPU reference validation process.\n"
"[--mark-pred] Keep both label info and predecessor info.\n"
"[--disable-size-check] Disable frontier queue size check.\n"
"[--grid-size=<grid size>] Maximum allowed grid size setting.\n"
"[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--in-sizing=<in/out_queue_scale_factor>]\n"
" Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--v] Print verbose per iteration debug info.\n"
"[--iteration-num=<num>] Number of runs to perform the test.\n"
"[--quiet] No output (unless --json is specified).\n"
"[--json] Output JSON-format statistics to STDOUT.\n"
"[--jsonfile=<name>] Output JSON-format statistics to file <name>\n"
"[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n"
" where name is auto-generated.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename SizeT, typename Value>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
SizeT top = (nodes < 10) ? nodes : 10;
printf("Top %lld Page Ranks:\n", (long long)top);
for (SizeT i = 0; i < top; ++i)
{
printf("Vertex ID: %lld, Page Rank: %5f\n",
(long long)node_id[i], rank[i]);
}
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference WTF implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template <
typename VertexId,
typename SizeT,
typename Value>
void ReferenceWTF(
const Csr<VertexId, SizeT, Value> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property,
property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (SizeT j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(
ranks.begin(), get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<SizeT> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
SizeT cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (SizeT i = 0; i < cot_size; ++i)
{
VertexId node = node_id[i];
for (SizeT j = graph.row_offsets[node];
j < graph.row_offsets[node + 1]; ++j)
{
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
SizeT salsa_iter = 1.0 / alpha + 1;
for (SizeT iter = 0; iter < salsa_iter; ++iter)
{
for (SizeT i = 0; i < cot_size; ++i)
{
VertexId node = node_id[i];
SizeT out_degree = graph.row_offsets[node + 1] - graph.row_offsets[node];
for (SizeT j = graph.row_offsets[node];
j < graph.row_offsets[node + 1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = rank[node] / (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (SizeT i = 0; i < cot_size; ++i)
{
rank[node_id[i]] = 0;
}
for (SizeT i = 0; i < cot_size; ++i)
{
VertexId node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (SizeT j = graph.row_offsets[node];
j < graph.row_offsets[node + 1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = (1 - alpha) * refscore[edge] / in_degree[edge];
rank[node] += val;
}
}
for (SizeT i = 0; i < cot_size; ++i)
{
if (iter + 1 < salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(
final_list, final_list + num_vertices(g),
PRCompare<RankPair<SizeT, Value> >);
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run HITS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
*
* @param[in] info Pointer to info contains parameters and statistics.
*
*/
template <
typename VertexId,
typename SizeT,
typename Value>
//bool INSTRUMENT,
//bool DEBUG,
//bool SIZE_CHECK >
void RunTests(Info<VertexId, SizeT, Value> *info)
{
typedef WTFProblem <
VertexId,
SizeT,
Value >
Problem;
typedef WTFEnactor <Problem>
Enactor;
Csr<VertexId, SizeT, Value> *csr = info->csr_ptr;
VertexId src = info->info["source_vertex" ].get_int64();
int max_grid_size = info->info["max_grid_size" ].get_int ();
int num_gpus = info->info["num_gpus" ].get_int ();
double max_queue_sizing = info->info["max_queue_sizing" ].get_real ();
double max_queue_sizing1 = info->info["max_queue_sizing1" ].get_real ();
double max_in_sizing = info->info["max_in_sizing" ].get_real ();
std::string partition_method = info->info["partition_method" ].get_str ();
double partition_factor = info->info["partition_factor" ].get_real ();
int partition_seed = info->info["partition_seed" ].get_int ();
bool quick_mode = info->info["quick_mode" ].get_bool ();
bool quiet_mode = info->info["quiet_mode" ].get_bool ();
bool stream_from_host = info->info["stream_from_host" ].get_bool ();
bool instrument = info->info["instrument" ].get_bool ();
bool debug = info->info["debug_mode" ].get_bool ();
bool size_check = info->info["size_check" ].get_bool ();
Value alpha = info->info["alpha" ].get_real ();
Value delta = info->info["delta" ].get_real ();
Value error = info->info["error" ].get_real ();
SizeT max_iter = info->info["max_iteration" ].get_int ();
CpuTimer cpu_timer;
cpu_timer.Start();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
hipStream_t *streams = (hipStream_t*)info->streams;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
Value *reference_check = (quick_mode) ? NULL : reference_rank;
// Allocate problem on GPU
Problem *problem = new Problem;
util::GRError(problem -> Init(
stream_from_host,
csr,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"Problem WTF Initialization Failed", __FILE__, __LINE__);
// Allocate WTF enactor map
Enactor *enactor = new Enactor(
num_gpus, gpu_idx, instrument, debug, size_check);
util::GRError(enactor -> Init(
context, problem, max_grid_size),
"WTF Enactor Init failed", __FILE__, __LINE__);
cpu_timer.Stop();
info -> info["preprocess_time"] = cpu_timer.ElapsedMillis();
// Perform WTF
util::GRError(problem -> Reset(
src, delta, alpha, error, enactor -> GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"WTF Problem Data Reset failed", __FILE__, __LINE__);
util::GRError(enactor -> Reset(),
"WTF Enactor Reset failed", __FILE__, __LINE__);
cpu_timer.Start();
util::GRError(enactor -> Enact(
src, alpha, max_iter),
"WTF Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
cpu_timer.Start();
// Copy out results
util::GRError(problem -> Extract(h_rank, h_node_id),
"HITS Problem Data Extraction Failed", __FILE__, __LINE__);
double total_pr = 0;
for (SizeT i = 0; i < csr->nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU HITS solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("compute ref value\n");
ReferenceWTF(
*csr,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
if (!quiet_mode) printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("Validity: ");
CompareResults(h_rank, reference_check, csr->nodes, true);
}
if (!quiet_mode)
{
printf("\nGPU result.");
DisplaySolution(h_node_id, h_rank, csr->nodes);
}
info->ComputeCommonStats(enactor -> enactor_stats.GetPointer(), elapsed, (VertexId*)NULL);
// Cleanup
if (problem ) delete problem;
if (enactor ) delete enactor;
if (reference_check) free(reference_check);
if (h_rank ) free(h_rank);
//hipDeviceSynchronize();
cpu_timer.Stop();
info->info["postprocess_time"] = cpu_timer.ElapsedMillis();
}
/******************************************************************************
* Main
******************************************************************************/
template <
typename VertexId, // use int as the vertex identifier
typename SizeT , // use int as the graph size type
typename Value > // use int as the value type
int main_(CommandLineArgs *args)
{
CpuTimer cpu_timer, cpu_timer2;
cpu_timer.Start();
//
// Construct graph and perform search(es)
//
Csr <VertexId, SizeT, Value> csr(false); // default for stream_from_host
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
info->info["undirected"] = args -> CheckCmdLineFlag("undirected");
cpu_timer2.Start();
info->Init("WTF", *args, csr);
cpu_timer2.Stop();
info->info["load_time"] = cpu_timer2.ElapsedMillis();
RunTests<VertexId, SizeT, Value>(info);
cpu_timer.Stop();
info->info["total_time"] = cpu_timer.ElapsedMillis();
if (!(info->info["quiet_mode"].get_bool()))
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
return 0;
}
template <
typename VertexId, // the vertex identifier type, usually int or long long
typename SizeT > // the size tyep, usually int or long long
int main_Value(CommandLineArgs *args)
{
// disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-Value"))
// return main_<VertexId, SizeT, double>(args);
// else
return main_<VertexId, SizeT, float >(args);
}
template <
typename VertexId>
int main_SizeT(CommandLineArgs *args)
{
// disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-SizeT"))
// return main_Value<VertexId, long long>(args);
// else
return main_Value<VertexId, int >(args);
}
int main_VertexId(CommandLineArgs *args)
{
// disabled, because oprtr::filter::KernelPolicy::SmemStorage is too large for 64bit VertexId
//if (args -> CheckCmdLineFlag("64bit-VertexId"))
// return main_SizeT<long long>(args);
//else
return main_SizeT<int >(args);
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
return main_VertexId(&args);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 8ba8f65a7a402af942bba106c157116dca21c849.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
//bool g_verbose;
//bool g_undirected;
//bool g_quick;
//bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair
{
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) :
vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"test <graph-type> [graph-type-arguments]\n"
"Graph type and graph type arguments:\n"
" market <matrix-market-file-name>\n"
" Reads a Matrix-Market coordinate-formatted graph of\n"
" directed/undirected edges from STDIN (or from the\n"
" optionally-specified file).\n"
" rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n"
" Generate R-MAT graph as input\n"
" --rmat_scale=<vertex-scale>\n"
" --rmat_nodes=<number-nodes>\n"
" --rmat_edgefactor=<edge-factor>\n"
" --rmat_edges=<number-edges>\n"
" --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n"
" --rmat_seed=<seed>\n"
" rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n"
" Generate Random Geometry Graph as input\n"
" --rgg_scale=<vertex-scale>\n"
" --rgg_nodes=<number-nodes>\n"
" --rgg_thfactor=<threshold-factor>\n"
" --rgg_threshold=<threshold>\n"
" --rgg_vmultipiler=<vmultipiler>\n"
" --rgg_seed=<seed>\n\n"
"Optional arguments:\n"
"[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n"
"[--undirected] Treat the graph as undirected (symmetric).\n"
"[--instrumented] Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty.\n"
" (a relative indicator of load imbalance.)\n"
"[--quick] Skip the CPU reference validation process.\n"
"[--mark-pred] Keep both label info and predecessor info.\n"
"[--disable-size-check] Disable frontier queue size check.\n"
"[--grid-size=<grid size>] Maximum allowed grid size setting.\n"
"[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--in-sizing=<in/out_queue_scale_factor>]\n"
" Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>). (Default: 1.0)\n"
"[--v] Print verbose per iteration debug info.\n"
"[--iteration-num=<num>] Number of runs to perform the test.\n"
"[--quiet] No output (unless --json is specified).\n"
"[--json] Output JSON-format statistics to STDOUT.\n"
"[--jsonfile=<name>] Output JSON-format statistics to file <name>\n"
"[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n"
" where name is auto-generated.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename SizeT, typename Value>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
SizeT top = (nodes < 10) ? nodes : 10;
printf("Top %lld Page Ranks:\n", (long long)top);
for (SizeT i = 0; i < top; ++i)
{
printf("Vertex ID: %lld, Page Rank: %5f\n",
(long long)node_id[i], rank[i]);
}
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference WTF implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template <
typename VertexId,
typename SizeT,
typename Value>
void ReferenceWTF(
const Csr<VertexId, SizeT, Value> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property,
property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (SizeT j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(
ranks.begin(), get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<SizeT> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
SizeT cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (SizeT i = 0; i < cot_size; ++i)
{
VertexId node = node_id[i];
for (SizeT j = graph.row_offsets[node];
j < graph.row_offsets[node + 1]; ++j)
{
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
SizeT salsa_iter = 1.0 / alpha + 1;
for (SizeT iter = 0; iter < salsa_iter; ++iter)
{
for (SizeT i = 0; i < cot_size; ++i)
{
VertexId node = node_id[i];
SizeT out_degree = graph.row_offsets[node + 1] - graph.row_offsets[node];
for (SizeT j = graph.row_offsets[node];
j < graph.row_offsets[node + 1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = rank[node] / (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (SizeT i = 0; i < cot_size; ++i)
{
rank[node_id[i]] = 0;
}
for (SizeT i = 0; i < cot_size; ++i)
{
VertexId node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (SizeT j = graph.row_offsets[node];
j < graph.row_offsets[node + 1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = (1 - alpha) * refscore[edge] / in_degree[edge];
rank[node] += val;
}
}
for (SizeT i = 0; i < cot_size; ++i)
{
if (iter + 1 < salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(
final_list, final_list + num_vertices(g),
PRCompare<RankPair<SizeT, Value> >);
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run HITS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
*
* @param[in] info Pointer to info contains parameters and statistics.
*
*/
template <
typename VertexId,
typename SizeT,
typename Value>
//bool INSTRUMENT,
//bool DEBUG,
//bool SIZE_CHECK >
void RunTests(Info<VertexId, SizeT, Value> *info)
{
typedef WTFProblem <
VertexId,
SizeT,
Value >
Problem;
typedef WTFEnactor <Problem>
Enactor;
Csr<VertexId, SizeT, Value> *csr = info->csr_ptr;
VertexId src = info->info["source_vertex" ].get_int64();
int max_grid_size = info->info["max_grid_size" ].get_int ();
int num_gpus = info->info["num_gpus" ].get_int ();
double max_queue_sizing = info->info["max_queue_sizing" ].get_real ();
double max_queue_sizing1 = info->info["max_queue_sizing1" ].get_real ();
double max_in_sizing = info->info["max_in_sizing" ].get_real ();
std::string partition_method = info->info["partition_method" ].get_str ();
double partition_factor = info->info["partition_factor" ].get_real ();
int partition_seed = info->info["partition_seed" ].get_int ();
bool quick_mode = info->info["quick_mode" ].get_bool ();
bool quiet_mode = info->info["quiet_mode" ].get_bool ();
bool stream_from_host = info->info["stream_from_host" ].get_bool ();
bool instrument = info->info["instrument" ].get_bool ();
bool debug = info->info["debug_mode" ].get_bool ();
bool size_check = info->info["size_check" ].get_bool ();
Value alpha = info->info["alpha" ].get_real ();
Value delta = info->info["delta" ].get_real ();
Value error = info->info["error" ].get_real ();
SizeT max_iter = info->info["max_iteration" ].get_int ();
CpuTimer cpu_timer;
cpu_timer.Start();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
cudaStream_t *streams = (cudaStream_t*)info->streams;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
Value *reference_check = (quick_mode) ? NULL : reference_rank;
// Allocate problem on GPU
Problem *problem = new Problem;
util::GRError(problem -> Init(
stream_from_host,
csr,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"Problem WTF Initialization Failed", __FILE__, __LINE__);
// Allocate WTF enactor map
Enactor *enactor = new Enactor(
num_gpus, gpu_idx, instrument, debug, size_check);
util::GRError(enactor -> Init(
context, problem, max_grid_size),
"WTF Enactor Init failed", __FILE__, __LINE__);
cpu_timer.Stop();
info -> info["preprocess_time"] = cpu_timer.ElapsedMillis();
// Perform WTF
util::GRError(problem -> Reset(
src, delta, alpha, error, enactor -> GetFrontierType(),
max_queue_sizing, max_queue_sizing1),
"WTF Problem Data Reset failed", __FILE__, __LINE__);
util::GRError(enactor -> Reset(),
"WTF Enactor Reset failed", __FILE__, __LINE__);
cpu_timer.Start();
util::GRError(enactor -> Enact(
src, alpha, max_iter),
"WTF Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
cpu_timer.Start();
// Copy out results
util::GRError(problem -> Extract(h_rank, h_node_id),
"HITS Problem Data Extraction Failed", __FILE__, __LINE__);
double total_pr = 0;
for (SizeT i = 0; i < csr->nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU HITS solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("compute ref value\n");
ReferenceWTF(
*csr,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
if (!quiet_mode) printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("Validity: ");
CompareResults(h_rank, reference_check, csr->nodes, true);
}
if (!quiet_mode)
{
printf("\nGPU result.");
DisplaySolution(h_node_id, h_rank, csr->nodes);
}
info->ComputeCommonStats(enactor -> enactor_stats.GetPointer(), elapsed, (VertexId*)NULL);
// Cleanup
if (problem ) delete problem;
if (enactor ) delete enactor;
if (reference_check) free(reference_check);
if (h_rank ) free(h_rank);
//cudaDeviceSynchronize();
cpu_timer.Stop();
info->info["postprocess_time"] = cpu_timer.ElapsedMillis();
}
/******************************************************************************
* Main
******************************************************************************/
template <
typename VertexId, // use int as the vertex identifier
typename SizeT , // use int as the graph size type
typename Value > // use int as the value type
int main_(CommandLineArgs *args)
{
CpuTimer cpu_timer, cpu_timer2;
cpu_timer.Start();
//
// Construct graph and perform search(es)
//
Csr <VertexId, SizeT, Value> csr(false); // default for stream_from_host
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
info->info["undirected"] = args -> CheckCmdLineFlag("undirected");
cpu_timer2.Start();
info->Init("WTF", *args, csr);
cpu_timer2.Stop();
info->info["load_time"] = cpu_timer2.ElapsedMillis();
RunTests<VertexId, SizeT, Value>(info);
cpu_timer.Stop();
info->info["total_time"] = cpu_timer.ElapsedMillis();
if (!(info->info["quiet_mode"].get_bool()))
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
return 0;
}
template <
typename VertexId, // the vertex identifier type, usually int or long long
typename SizeT > // the size tyep, usually int or long long
int main_Value(CommandLineArgs *args)
{
// disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-Value"))
// return main_<VertexId, SizeT, double>(args);
// else
return main_<VertexId, SizeT, float >(args);
}
template <
typename VertexId>
int main_SizeT(CommandLineArgs *args)
{
// disabled to reduce compile time
// if (args -> CheckCmdLineFlag("64bit-SizeT"))
// return main_Value<VertexId, long long>(args);
// else
return main_Value<VertexId, int >(args);
}
int main_VertexId(CommandLineArgs *args)
{
// disabled, because oprtr::filter::KernelPolicy::SmemStorage is too large for 64bit VertexId
//if (args -> CheckCmdLineFlag("64bit-VertexId"))
// return main_SizeT<long long>(args);
//else
return main_SizeT<int >(args);
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
return main_VertexId(&args);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
21e13272c43712d83151e26ff860bd3a87e9740e.hip | // !!! This is a file automatically generated by hipify!!!
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <math.h>
#include "rocblas.h"
#include <hip/hip_runtime.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void histogram256_global(float* img, int* histo, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
float value = 0;
if (t_numx + w*t_numy + t_numz < w*h){
for (int j =0; j < nc; j++){
value += img[t_numx + w*t_numy + j*t_numz*w*h];
}
value = value*255.f/float(nc);
atomicAdd((int*)&(histo[(int)value]), 1);
}
}
__global__ void histogram256_shared(float* img, int* histo, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
float value = 0;
__shared__ int block_histo[256];
if ( threadIdx.x == 0 && threadIdx.y == 0 ){
for(int i=0 ; i<256 ; i++)
block_histo[i] = 0;
}
__syncthreads();
if (t_numx + w*t_numy + t_numz < w*h){
for (int j =0; j < nc; j++){
value += img[t_numx + w*t_numy + j*t_numz*w*h];
}
value = value*255.f/float(nc);
atomicAdd((int*)&(block_histo[(int)value]), 1);
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ){
for(int i=0 ; i<256 ; i++)
atomicAdd((int*)&(histo[i]), block_histo[i]);
}
__syncthreads();
}
int main(int argc, char **argv)
{
hipDeviceSynchronize(); CUDA_CHECK;
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
mIn.convertTo(mIn,CV_32F);
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << " x " << nc <<endl;
float *imgIn = new float[(size_t)w*h*nc];
convert_mat_to_layered (imgIn, mIn);
float* cuda_imgIn;
int* cuda_histo256;
hipMalloc((void**) &cuda_imgIn , w*h*nc*sizeof(float));
hipMalloc((void**) &cuda_histo256 , 256*sizeof(int));
hipMemset(cuda_histo256 , 0, 256*sizeof(int));
hipMemcpy(cuda_imgIn, imgIn , w*h*nc*sizeof(float) , hipMemcpyHostToDevice);
dim3 block = dim3(32,32,1);
int grid_x = ((w + block.x + 1)/block.x);
int grid_y = ((h + block.y + 1)/block.y);
int grid_z = 1;
dim3 grid = dim3(grid_x, grid_y, grid_z );
Timer timer;
timer.start();
hipLaunchKernelGGL(( histogram256_global) , dim3(grid), dim3(block), 0, 0, cuda_imgIn, cuda_histo256 , w, h, nc);
timer.end();
float t_global = timer.get(); // elapsed time in seconds
cout << " " << endl;
cout << "time when using global atomics: " << t_global*1000 << " ms" << endl;
cout << " " << endl;
int* histo_global = new int[256];
hipMemcpy(histo_global, cuda_histo256, 256*sizeof(int), hipMemcpyDeviceToHost);
showHistogram256("HISTOGRAM_GLOBAL" , histo_global, 100 + w, 100);
hipMemset(cuda_histo256 , 0, 256*sizeof(int));
timer.start();
hipLaunchKernelGGL(( histogram256_shared) , dim3(grid), dim3(block), 0, 0, cuda_imgIn, cuda_histo256 , w, h, nc);
timer.end();
float t_shared = timer.get(); // elapsed time in seconds
cout << "time when using shared atomics: " << t_shared*1000 << " ms" << endl;
int* histo_shared = new int[256];
hipMemcpy(histo_shared, cuda_histo256, 256*sizeof(int), hipMemcpyDeviceToHost);
showHistogram256("HISTOGRAM_SHARED" , histo_shared, 100 + 2*w, 100);
cout << " " << endl;
cout << "Percentage improvement with shared atomics: "<< 100*((t_global - t_shared)/t_shared) << endl;
cout << " " << endl;
// show input image
showImage("Input", mIn, 100, 100);
cv::waitKey(0);
// free allocated arrays
delete[] imgIn;
delete[] histo_shared;
delete[] histo_global;
hipFree(cuda_imgIn);
hipFree(cuda_histo256);
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 21e13272c43712d83151e26ff860bd3a87e9740e.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <math.h>
#include "cublas.h"
#include <cuda_runtime.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void histogram256_global(float* img, int* histo, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
float value = 0;
if (t_numx + w*t_numy + t_numz < w*h){
for (int j =0; j < nc; j++){
value += img[t_numx + w*t_numy + j*t_numz*w*h];
}
value = value*255.f/float(nc);
atomicAdd((int*)&(histo[(int)value]), 1);
}
}
__global__ void histogram256_shared(float* img, int* histo, int w, int h, int nc){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
float value = 0;
__shared__ int block_histo[256];
if ( threadIdx.x == 0 && threadIdx.y == 0 ){
for(int i=0 ; i<256 ; i++)
block_histo[i] = 0;
}
__syncthreads();
if (t_numx + w*t_numy + t_numz < w*h){
for (int j =0; j < nc; j++){
value += img[t_numx + w*t_numy + j*t_numz*w*h];
}
value = value*255.f/float(nc);
atomicAdd((int*)&(block_histo[(int)value]), 1);
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ){
for(int i=0 ; i<256 ; i++)
atomicAdd((int*)&(histo[i]), block_histo[i]);
}
__syncthreads();
}
int main(int argc, char **argv)
{
cudaDeviceSynchronize(); CUDA_CHECK;
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
mIn.convertTo(mIn,CV_32F);
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << " x " << nc <<endl;
float *imgIn = new float[(size_t)w*h*nc];
convert_mat_to_layered (imgIn, mIn);
float* cuda_imgIn;
int* cuda_histo256;
cudaMalloc((void**) &cuda_imgIn , w*h*nc*sizeof(float));
cudaMalloc((void**) &cuda_histo256 , 256*sizeof(int));
cudaMemset(cuda_histo256 , 0, 256*sizeof(int));
cudaMemcpy(cuda_imgIn, imgIn , w*h*nc*sizeof(float) , cudaMemcpyHostToDevice);
dim3 block = dim3(32,32,1);
int grid_x = ((w + block.x + 1)/block.x);
int grid_y = ((h + block.y + 1)/block.y);
int grid_z = 1;
dim3 grid = dim3(grid_x, grid_y, grid_z );
Timer timer;
timer.start();
histogram256_global <<<grid, block>>>(cuda_imgIn, cuda_histo256 , w, h, nc);
timer.end();
float t_global = timer.get(); // elapsed time in seconds
cout << " " << endl;
cout << "time when using global atomics: " << t_global*1000 << " ms" << endl;
cout << " " << endl;
int* histo_global = new int[256];
cudaMemcpy(histo_global, cuda_histo256, 256*sizeof(int), cudaMemcpyDeviceToHost);
showHistogram256("HISTOGRAM_GLOBAL" , histo_global, 100 + w, 100);
cudaMemset(cuda_histo256 , 0, 256*sizeof(int));
timer.start();
histogram256_shared <<<grid, block>>>(cuda_imgIn, cuda_histo256 , w, h, nc);
timer.end();
float t_shared = timer.get(); // elapsed time in seconds
cout << "time when using shared atomics: " << t_shared*1000 << " ms" << endl;
int* histo_shared = new int[256];
cudaMemcpy(histo_shared, cuda_histo256, 256*sizeof(int), cudaMemcpyDeviceToHost);
showHistogram256("HISTOGRAM_SHARED" , histo_shared, 100 + 2*w, 100);
cout << " " << endl;
cout << "Percentage improvement with shared atomics: "<< 100*((t_global - t_shared)/t_shared) << endl;
cout << " " << endl;
// show input image
showImage("Input", mIn, 100, 100);
cv::waitKey(0);
// free allocated arrays
delete[] imgIn;
delete[] histo_shared;
delete[] histo_global;
cudaFree(cuda_imgIn);
cudaFree(cuda_histo256);
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
5732811ccec34a782ac79094f6da4ef4287ac094.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
//
//openGL
__device__
unsigned char clip(int n) {return( n >= 255 ? 255 : (n < 0 ? 0 : n)); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
if ((c >= w) || (r >= h)) return;
const int i = r * w + c;
// important formula
const int dist = sqrtf((pos.x - c) * (pos.x - c) + (pos.y - r) * (pos.y - r));
const unsigned char d = clip(255 - dist);
d_out[i].x = d; // RED
d_out[i].y = d; // GREEN
d_out[i].z = 0; // blue
d_out[i].w = 0; // opaque
}
void kernelLauncher(uchar4 * d_out, int w, int h, int2 pos)
{
dim3 blockSize(TX, TY);
dim3 gridSize((w + TX - 1) / TX, (h + TY - 1) / TY); // block
distanceKernel << <gridSize, blockSize >> > (d_out, w, h, pos);
}
| 5732811ccec34a782ac79094f6da4ef4287ac094.cu | #include "kernel.h"
#define TX 32
#define TY 32
// 从这里开始。。。
//openGL不一定被安装好了
__device__
unsigned char clip(int n) {return( n >= 255 ? 255 : (n < 0 ? 0 : n)); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
if ((c >= w) || (r >= h)) return;
const int i = r * w + c;
// important formula
const int dist = sqrtf((pos.x - c) * (pos.x - c) + (pos.y - r) * (pos.y - r));
const unsigned char d = clip(255 - dist);
d_out[i].x = d; // RED
d_out[i].y = d; // GREEN
d_out[i].z = 0; // blue
d_out[i].w = 0; // opaque
}
void kernelLauncher(uchar4 * d_out, int w, int h, int2 pos)
{
dim3 blockSize(TX, TY);
dim3 gridSize((w + TX - 1) / TX, (h + TY - 1) / TY); // 分配多少个block
distanceKernel << <gridSize, blockSize >> > (d_out, w, h, pos);
}
|
dd0d9548276ac95e70d274d91d649bd36b506ea6.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel/pt_ex.h"
#include "kernel/context.cuh"
#include "kernel/light.cuh"
#include "kernel/material.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/compaction.h"
#include "kernel/pt_common.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
__global__ void renderAOV(
idaten::PathTracingGeometryRendering::AOV* aovs,
int width, int height,
int sample, int maxSamples,
aten::mat4 mtxW2C,
const aten::ray* __restrict__ rays,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
hipTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
hipTextureObject_t vtxPos,
hipTextureObject_t vtxNml,
const aten::mat4* __restrict__ matrices,
const unsigned int* sobolmatrices)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width && iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.vtxNml = vtxNml;
ctxt.matrices = matrices;
}
auto ray = rays[idx];
aten::hitrecord rec;
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, ray, &isect);
if (isHit) {
auto obj = &ctxt.shapes[isect.objid];
evalHitResult(&ctxt, obj, ray, &rec, &isect);
aten::vec4 pos = aten::vec4(rec.p, 1);
pos = mtxW2C.apply(pos);
aovs[idx].mtrlid = isect.mtrlid; // material id.
aovs[idx].meshid = isect.meshid;
aovs[idx].depth = pos.w; // depth.
aovs[idx].normal = make_float3(rec.normal.x, rec.normal.y, rec.normal.z);
}
else {
aovs[idx].mtrlid = -1; // material id.
aovs[idx].meshid = -1;
aovs[idx].depth = AT_MATH_INF; // depth.
}
}
enum ReferPos {
UpperLeft,
LowerLeft,
UpperRight,
LowerRight,
};
__global__ void geometryRender(
const idaten::PathTracing::Path* __restrict__ paths,
const idaten::PathTracingGeometryRendering::AOV* __restrict__ aovs,
hipSurfaceObject_t outSurface,
int width, int height,
int mwidth, int mheight)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
static const int ratio = 2;
if (ix >= width && iy >= height) {
return;
}
int mx = ix / (float)ratio;
int my = iy / (float)ratio;
// NOTE
// +y
// |
// |
// 0 ---> +x
// NOTE
// ul
// +y ------- ur
// | |
// | |
// ll ---- +x lr
int2 pos[4] = {
make_int2(mx, min(my + 1, mheight - 1)), // upper-left.
make_int2(mx, my), // lower-left.
make_int2(min(mx + 1, mwidth - 1), min(my + 1, mheight - 1)), // upper-right.
make_int2(min(mx + 1, mwidth - 1), my), // lower-right.
};
// .
real u = aten::abs<int>(ix - pos[ReferPos::LowerLeft].x * ratio) / (real)ratio;
real v = aten::abs<int>(iy - pos[ReferPos::LowerLeft].y * ratio) / (real)ratio;
u = aten::clamp(u, AT_MATH_EPSILON, real(1));
v = aten::clamp(v, AT_MATH_EPSILON, real(1));
int refmidx = getIdx(ix, iy, width);
const int mtrlIdx = aovs[refmidx].mtrlid;
real norms[4] = {
1 / (u * (1 - v)),
1 / (u * v),
1 / ((1 - u) * (1 - v)),
1 / ((1 - u) * v),
};
real sumWeight = 0;
aten::vec3 denom;
for (int i = 0; i < 4; i++) {
auto midx = getIdx(pos[i].x * ratio, pos[i].y * ratio, width);
int refMtrlIdx = aovs[midx].mtrlid;
int coeff = (mtrlIdx == refMtrlIdx ? 1 : 0);
auto weight = norms[i] * coeff;;
auto cidx = getIdx(pos[i].x, pos[i].y, mwidth);
sumWeight += weight;
denom += paths[cidx].contrib / (real)paths[cidx].samples * weight;
}
denom = denom / (sumWeight + AT_MATH_EPSILON);
float4 data;
#if 1
surf2Dread(&data, outSurface, ix * sizeof(float4), iy);
// First data.w value is 0.
int n = data.w;
data = n * data + make_float4(denom.x, denom.y, denom.z, 0);
data /= (n + 1);
data.w = n + 1;
#else
data = make_float4(denom.x, denom.y, denom.z, 1);
#endif
surf2Dwrite(
data,
outSurface,
ix * sizeof(float4), iy,
hipBoundaryModeTrap);
}
namespace idaten
{
void PathTracingGeometryRendering::update(
GLuint gltex,
int width, int height,
const aten::CameraParameter& camera,
const std::vector<aten::GeomParameter>& shapes,
const std::vector<aten::MaterialParameter>& mtrls,
const std::vector<aten::LightParameter>& lights,
const std::vector<std::vector<aten::GPUBvhNode>>& nodes,
const std::vector<aten::PrimitiveParamter>& prims,
const std::vector<aten::vertex>& vtxs,
const std::vector<aten::mat4>& mtxs,
const std::vector<TextureResource>& texs,
const EnvmapResource& envmapRsc)
{
idaten::PathTracing::update(
gltex,
width, height,
camera,
shapes,
mtrls,
lights,
nodes,
prims,
vtxs,
mtxs,
texs, envmapRsc);
// TODO
m_aovs[0].init((width << 1) * (height << 1));
m_aovs[1].init((width << 1) * (height << 1));
}
void PathTracingGeometryRendering::onGenPath(
int width, int height,
int sample, int maxSamples,
hipTextureObject_t texVtxPos,
hipTextureObject_t texVtxNml)
{
idaten::PathTracing::onGenPath(
width, height,
sample, maxSamples,
texVtxPos,
texVtxNml);
if (sample == 0) {
renderAOVs(
width, height,
sample, maxSamples,
texVtxPos,
texVtxNml);
}
}
void PathTracingGeometryRendering::renderAOVs(
int width, int height,
int sample, int maxSamples,
hipTextureObject_t texVtxPos,
hipTextureObject_t texVtxNml)
{
int W = width;
int H = height;
aten::mat4 mtxW2V;
mtxW2V.lookat(
m_camParam.origin,
m_camParam.center,
m_camParam.up);
aten::mat4 mtxV2C;
mtxV2C.perspective(
m_camParam.znear,
m_camParam.zfar,
m_camParam.vfov,
m_camParam.aspect);
aten::mat4 mtxW2C = mtxV2C * mtxW2V;
getRenderAOVSize(W, H);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(W + block.x - 1) / block.x,
(H + block.y - 1) / block.y);
auto& aovs = getCurAOVs();
renderAOV << <grid, block >> > (
//renderAOV << <1, 1 >> > (
aovs.ptr(),
W, H,
sample, maxSamples,
mtxW2C,
m_rays.ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
texVtxNml,
m_mtxparams.ptr(),
m_sobolMatrices.ptr());
checkCudaKernel(renderAOV);
}
void PathTracingGeometryRendering::onGather(
hipSurfaceObject_t outputSurf,
int width, int height,
int maxSamples)
{
int mwidth = width;
int mheight = height;
width <<= 1;
height <<= 1;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
auto& aovs = getCurAOVs();
geometryRender << <grid, block >> > (
//geometryRender << <1, 1 >> > (
m_paths.ptr(),
aovs.ptr(),
outputSurf,
width, height,
mwidth, mheight);
checkCudaKernel(geometryRender);
}
}
| dd0d9548276ac95e70d274d91d649bd36b506ea6.cu | #include "kernel/pt_ex.h"
#include "kernel/context.cuh"
#include "kernel/light.cuh"
#include "kernel/material.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/compaction.h"
#include "kernel/pt_common.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
__global__ void renderAOV(
idaten::PathTracingGeometryRendering::AOV* aovs,
int width, int height,
int sample, int maxSamples,
aten::mat4 mtxW2C,
const aten::ray* __restrict__ rays,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
cudaTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
cudaTextureObject_t vtxPos,
cudaTextureObject_t vtxNml,
const aten::mat4* __restrict__ matrices,
const unsigned int* sobolmatrices)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width && iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.vtxNml = vtxNml;
ctxt.matrices = matrices;
}
auto ray = rays[idx];
aten::hitrecord rec;
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, ray, &isect);
if (isHit) {
auto obj = &ctxt.shapes[isect.objid];
evalHitResult(&ctxt, obj, ray, &rec, &isect);
aten::vec4 pos = aten::vec4(rec.p, 1);
pos = mtxW2C.apply(pos);
aovs[idx].mtrlid = isect.mtrlid; // material id.
aovs[idx].meshid = isect.meshid;
aovs[idx].depth = pos.w; // depth.
aovs[idx].normal = make_float3(rec.normal.x, rec.normal.y, rec.normal.z);
}
else {
aovs[idx].mtrlid = -1; // material id.
aovs[idx].meshid = -1;
aovs[idx].depth = AT_MATH_INF; // depth.
}
}
enum ReferPos {
UpperLeft,
LowerLeft,
UpperRight,
LowerRight,
};
__global__ void geometryRender(
const idaten::PathTracing::Path* __restrict__ paths,
const idaten::PathTracingGeometryRendering::AOV* __restrict__ aovs,
cudaSurfaceObject_t outSurface,
int width, int height,
int mwidth, int mheight)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
static const int ratio = 2;
if (ix >= width && iy >= height) {
return;
}
int mx = ix / (float)ratio;
int my = iy / (float)ratio;
// NOTE
// +y
// |
// |
// 0 ---> +x
// NOTE
// ul
// +y ------- ur
// | |
// | |
// ll ---- +x lr
int2 pos[4] = {
make_int2(mx, min(my + 1, mheight - 1)), // upper-left.
make_int2(mx, my), // lower-left.
make_int2(min(mx + 1, mwidth - 1), min(my + 1, mheight - 1)), // upper-right.
make_int2(min(mx + 1, mwidth - 1), my), // lower-right.
};
// 基準点(左下)からの比率を計算.
real u = aten::abs<int>(ix - pos[ReferPos::LowerLeft].x * ratio) / (real)ratio;
real v = aten::abs<int>(iy - pos[ReferPos::LowerLeft].y * ratio) / (real)ratio;
u = aten::clamp(u, AT_MATH_EPSILON, real(1));
v = aten::clamp(v, AT_MATH_EPSILON, real(1));
int refmidx = getIdx(ix, iy, width);
const int mtrlIdx = aovs[refmidx].mtrlid;
real norms[4] = {
1 / (u * (1 - v)),
1 / (u * v),
1 / ((1 - u) * (1 - v)),
1 / ((1 - u) * v),
};
real sumWeight = 0;
aten::vec3 denom;
for (int i = 0; i < 4; i++) {
auto midx = getIdx(pos[i].x * ratio, pos[i].y * ratio, width);
int refMtrlIdx = aovs[midx].mtrlid;
int coeff = (mtrlIdx == refMtrlIdx ? 1 : 0);
auto weight = norms[i] * coeff;;
auto cidx = getIdx(pos[i].x, pos[i].y, mwidth);
sumWeight += weight;
denom += paths[cidx].contrib / (real)paths[cidx].samples * weight;
}
denom = denom / (sumWeight + AT_MATH_EPSILON);
float4 data;
#if 1
surf2Dread(&data, outSurface, ix * sizeof(float4), iy);
// First data.w value is 0.
int n = data.w;
data = n * data + make_float4(denom.x, denom.y, denom.z, 0);
data /= (n + 1);
data.w = n + 1;
#else
data = make_float4(denom.x, denom.y, denom.z, 1);
#endif
surf2Dwrite(
data,
outSurface,
ix * sizeof(float4), iy,
cudaBoundaryModeTrap);
}
namespace idaten
{
void PathTracingGeometryRendering::update(
GLuint gltex,
int width, int height,
const aten::CameraParameter& camera,
const std::vector<aten::GeomParameter>& shapes,
const std::vector<aten::MaterialParameter>& mtrls,
const std::vector<aten::LightParameter>& lights,
const std::vector<std::vector<aten::GPUBvhNode>>& nodes,
const std::vector<aten::PrimitiveParamter>& prims,
const std::vector<aten::vertex>& vtxs,
const std::vector<aten::mat4>& mtxs,
const std::vector<TextureResource>& texs,
const EnvmapResource& envmapRsc)
{
idaten::PathTracing::update(
gltex,
width, height,
camera,
shapes,
mtrls,
lights,
nodes,
prims,
vtxs,
mtxs,
texs, envmapRsc);
// TODO
m_aovs[0].init((width << 1) * (height << 1));
m_aovs[1].init((width << 1) * (height << 1));
}
void PathTracingGeometryRendering::onGenPath(
int width, int height,
int sample, int maxSamples,
cudaTextureObject_t texVtxPos,
cudaTextureObject_t texVtxNml)
{
idaten::PathTracing::onGenPath(
width, height,
sample, maxSamples,
texVtxPos,
texVtxNml);
if (sample == 0) {
renderAOVs(
width, height,
sample, maxSamples,
texVtxPos,
texVtxNml);
}
}
void PathTracingGeometryRendering::renderAOVs(
int width, int height,
int sample, int maxSamples,
cudaTextureObject_t texVtxPos,
cudaTextureObject_t texVtxNml)
{
int W = width;
int H = height;
aten::mat4 mtxW2V;
mtxW2V.lookat(
m_camParam.origin,
m_camParam.center,
m_camParam.up);
aten::mat4 mtxV2C;
mtxV2C.perspective(
m_camParam.znear,
m_camParam.zfar,
m_camParam.vfov,
m_camParam.aspect);
aten::mat4 mtxW2C = mtxV2C * mtxW2V;
getRenderAOVSize(W, H);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(W + block.x - 1) / block.x,
(H + block.y - 1) / block.y);
auto& aovs = getCurAOVs();
renderAOV << <grid, block >> > (
//renderAOV << <1, 1 >> > (
aovs.ptr(),
W, H,
sample, maxSamples,
mtxW2C,
m_rays.ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
texVtxNml,
m_mtxparams.ptr(),
m_sobolMatrices.ptr());
checkCudaKernel(renderAOV);
}
void PathTracingGeometryRendering::onGather(
cudaSurfaceObject_t outputSurf,
int width, int height,
int maxSamples)
{
int mwidth = width;
int mheight = height;
width <<= 1;
height <<= 1;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
auto& aovs = getCurAOVs();
geometryRender << <grid, block >> > (
//geometryRender << <1, 1 >> > (
m_paths.ptr(),
aovs.ptr(),
outputSurf,
width, height,
mwidth, mheight);
checkCudaKernel(geometryRender);
}
}
|
e60efa98f66323904dae4f5c944fc67f660c753c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file example_unity.cu
* \author Jackson Parker
* \date Feb 8 2020
* \brief Example on the usage of the Unity structure.
* \details This can also act as an executable for unit testing.
* \example
* \see Unity
*/
#include "../../include/common_includes.h"
/*
NOTE: This example kernel is VERY simple compared to most
and never has numElements hit grid.x limits
It is meant as a Unity example
*/
__global__ void add_100(int numElements, int* data){
if(blockIdx.x < numElements){
data[blockIdx.x] += 100;
}
}
__device__ bool i_greater(const int& a, const int& b){
return a > b;
}
__device__ jax::Unity<int>::comp_ptr greater_device = i_greater;
__device__ bool less70(const int& a){
return a < 70;
}
__device__ jax::Unity<int>::pred_ptr less70_device = less70;
__device__ bool greq70(const int& a){
return a >= 70;
}
__device__ jax::Unity<int>::pred_ptr greq70_device = greq70;
void add_100(jax::Unity<int>* i_nums){
//check where the data is
jax::MemoryState origin = i_nums->getMemoryState();
//make sure i_nums.device has the most up to date memory
if(origin == jax::cpu || i_nums->getFore() == jax::cpu){
i_nums->transferMemoryTo(jax::gpu);//this is making i_nums.state = i_nums.fore = jax::both
}
hipLaunchKernelGGL(( add_100), dim3(i_nums->size()),dim3(1), 0, 0, i_nums->size(),i_nums->device);
hipDeviceSynchronize();//global threadfence
CudaCheckError();//cuda error checker
i_nums->setFore(jax::gpu);//tell Unity where the updated memory is
if(origin == jax::cpu){
i_nums->setMemoryState(jax::cpu);//returning i_nums with state = cpu
}
else if(origin == jax::both){
i_nums->transferMemoryTo(jax::cpu);//just make sure i_nums.fore = both
}
//else origin was on the gpu so no need to do anything
}
template<typename T>
bool printTest(unsigned long numElements, T* data, T* truth){
for(int i = 0; i < numElements; ++i){
if(data[i] != truth[i]){
std::cout<<"test failed"<<std::endl;
return false;
}
}
std::cout<<"test passed"<<std::endl;
return true;
}
int main(int argc, char *argv[]){
try{
int fullpass = 0;
/*
Instantiate a Unity of a certain length from a nullptr.
*/
std::cout<<"test proper fore usage, Unity<T>::transferMemoryTo and Unity<T>::setMemoryState\n";
jax::Unity<int> i_nums = jax::Unity<int>(nullptr,100,jax::cpu);
std::vector<int> truth;
/*
Fill host with information.
*/
for(int i = 0; i < 100; ++i){
i_nums.host[i] = i-100;
truth.push_back((i-100)+100);
}
/*
Transfer to gpu.
*/
i_nums.transferMemoryTo(jax::gpu);
std::cout<<"\tafter i_nums.transferMemoryTo(jax::gpu) ";
i_nums.printInfo();
hipLaunchKernelGGL(( add_100), dim3(i_nums.size()),dim3(1), 0, 0, i_nums.size(),i_nums.device);
/*
Now make sure Unity knows that I have changed values on
the device because memory is also on the CPU.
*/
i_nums.setFore(jax::gpu);
std::cout<<"\tafter i_nums.setFore(jax::gpu) ";
i_nums.printInfo();
/*
Now I want the memory on the cpu but not on the gpu anymore.
NOTE: due to not calling hipDeviceSynchronize()
this is the threadfence as it uses hipMemcpy.
*/
i_nums.setMemoryState(jax::cpu);
CudaCheckError();//cuda error checker
std::cout<<"\tafter i_nums.setMemoryState(jax::cpu) ";
i_nums.printInfo();
std::cout<<"\t";
/*
Because we setFore Unity knew that I recently updated
i_nums.device so it transfered that update to i_nums.host
before deleting i_nums.device.
*/
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
Now lets delete and replace i_nums data. Unity<T>::setData
does not copy the passed data it actually uses it and sets
device or host to it.
*/
std::cout<<"testing Unity<T>::setData"<<std::endl;
int* replacement = new int[1000]();
for(int i = 0; i < 1000; ++i){
replacement[i] = -i-100;
if(i < 100) truth[i] = -i;
else truth.push_back(-i);
}
i_nums.setData(replacement,1000,jax::cpu);
std::cout<<"\t";
fullpass += printTest<int>(i_nums.size(),i_nums.host,replacement);
/*
Now lets use a user function that assumes i_nums should return in
the same state it was in when passed to it.
*/
std::cout<<"testing user function handling state properly\n\t";
add_100(&i_nums);
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
Now lets resize the Unity so that only the first
10 elements are kept.
*/
std::cout<<"testing Unity<T>::resize\n\t";
i_nums.transferMemoryTo(jax::gpu);//Transfer to gpu, setting i_nums.state & fore to both
std::cout<<"after i_nums.transferMemoryTo(jax::gpu) ";
i_nums.printInfo();
std::cout<<"\t";
i_nums.resize(10);
std::cout<<"after i_nums.resize(10) ";
i_nums.printInfo();
std::cout<<"\ttest "<<((i_nums.size() == 10) ? "passed" : "failed")<<std::endl;
/*
Now lets test the zeroOut feature, which is essentially clear()
without deleting device or host.
*/
std::cout<<"testing Unity<T>::zeroOut\n\t";
i_nums.zeroOut(jax::cpu);
std::cout<<"after i_nums.zeroOut(jax::cpu) ";
i_nums.printInfo();
truth.clear();
for(int i = 0; i < 10; ++i){
truth.push_back(0);
}
std::cout<<"\t";
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
As the zeroOut function is setting i_nums.fore = cpu, we
can setFore(gpu) and any transfer other than clear will
give host back the original data. This also shows
how improperly tracking and setting fore can lead to
changes being overwritten. Unity<T>::fore is used to inform
Unity about changes to a particular memory address and is
vital in utilizing Unity.
NOTE: Unity will also not allow you to set
fore to both manually, this is done within unity by using transferMemoryState()
or setMemoryStateTo(both).
*/
std::cout<<"testing result of improper fore tracking with Unity<T>::transferMemoryTo\n\t";
i_nums.setFore(jax::gpu);
std::cout<<"after i_nums.setFore(jax::gpu) ";
i_nums.printInfo();
i_nums.transferMemoryTo(jax::cpu);
std::cout<<"\tafter i_nums.transferMemoryTo(jax::cpu) ";
i_nums.printInfo();
std::cout<<"\t";
for(int i = 0; i < 10; ++i){
truth[i] = -i;
}
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
Another example where fore messes things up
*/
std::cout<<"testing result of improper fore tracking with Unity<T>::setMemoryState\n\t";
i_nums.zeroOut(jax::gpu);//now gpu = fore as Unity sets it when zeroOut is called
std::cout<<"after i_nums.zeroOut(jax::gpu) ";
i_nums.printInfo();
for(int i = 0; i < i_nums.size(); ++i){
i_nums.host[i] = i;
truth[i] = 0;
}
/*
So now i_nums.host = {0,1,2,3,4,5,6,7,8,9}, butttttt
NOTE: if you try and transfer memory to the same state as fore
nothing will happen and a warning will be logged.
*/
i_nums.transferMemoryTo(jax::gpu);//example of transfer doing nothing due to fore being set to gpu
std::cout<<"\tafter i_nums.transferMemoryTo(jax::gpu) ";
i_nums.printInfo();
i_nums.setMemoryState(jax::cpu);//as gpu is fore this will transfer gpu to cpu before deleting i_nums.device
std::cout<<"\tafter i_nums.setMemoryState(jax::cpu) ";
i_nums.printInfo();
std::cout<<"\t";
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
i_nums.resize(100);
truth.clear();
for(int i = 0; i < 100; ++i){
i_nums.host[i] = i;
truth.push_back(99-i);
}
std::cout<<"testing result sorting with custom greater than\n\t";
jax::Unity<int>::comp_ptr greater_host;
hipMemcpyFromSymbol(&greater_host, greater_device, sizeof(jax::Unity<int>::comp_ptr));
i_nums.sort(greater_host);// Tmust have overloaded > operator
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
truth.clear();
for(int i = 0; i < 70; ++i){
truth.push_back(i);
}
std::cout<<"testing copy if constructor\n\t";
jax::Unity<int>::pred_ptr less70_host;
hipMemcpyFromSymbol(&less70_host, less70_device, sizeof(jax::Unity<int>::pred_ptr));
jax::Unity<int> i_nums_keep = jax::Unity<int>(&i_nums,less70_host);
i_nums_keep.sort();
fullpass += printTest<int>(i_nums_keep.size(),i_nums_keep.host,&truth[0]);
std::cout<<"testing remove if\n\t"<<std::endl;
jax::Unity<int>::pred_ptr greq70_host;
hipMemcpyFromSymbol(&greq70_host, greq70_device, sizeof(jax::Unity<int>::pred_ptr));
i_nums.remove(greq70_host);
i_nums.sort();
fullpass += printTest<int>(i_nums.size(),i_nums.host,i_nums_keep.host);
if(fullpass == 9){
std::cout<<"ALL TESTS PASSED"<<std::endl;
}
/*
Thats the basic concept of Unity!
And it can hold ANY type, just
replace <int> with <T> T=type of your data.
*/
return 0;
}
catch (const std::exception &e){
std::cerr << "Caught exception: " << e.what() << '\n';
std::exit(1);
}
catch (...){
std::cerr << "Caught unknown exception\n";
std::exit(1);
}
}
| e60efa98f66323904dae4f5c944fc67f660c753c.cu | /**
* \file example_unity.cu
* \author Jackson Parker
* \date Feb 8 2020
* \brief Example on the usage of the Unity structure.
* \details This can also act as an executable for unit testing.
* \example
* \see Unity
*/
#include "../../include/common_includes.h"
/*
NOTE: This example kernel is VERY simple compared to most
and never has numElements hit grid.x limits
It is meant as a Unity example
*/
__global__ void add_100(int numElements, int* data){
if(blockIdx.x < numElements){
data[blockIdx.x] += 100;
}
}
__device__ bool i_greater(const int& a, const int& b){
return a > b;
}
__device__ jax::Unity<int>::comp_ptr greater_device = i_greater;
__device__ bool less70(const int& a){
return a < 70;
}
__device__ jax::Unity<int>::pred_ptr less70_device = less70;
__device__ bool greq70(const int& a){
return a >= 70;
}
__device__ jax::Unity<int>::pred_ptr greq70_device = greq70;
void add_100(jax::Unity<int>* i_nums){
//check where the data is
jax::MemoryState origin = i_nums->getMemoryState();
//make sure i_nums.device has the most up to date memory
if(origin == jax::cpu || i_nums->getFore() == jax::cpu){
i_nums->transferMemoryTo(jax::gpu);//this is making i_nums.state = i_nums.fore = jax::both
}
add_100<<<i_nums->size(),1>>>(i_nums->size(),i_nums->device);
cudaDeviceSynchronize();//global threadfence
CudaCheckError();//cuda error checker
i_nums->setFore(jax::gpu);//tell Unity where the updated memory is
if(origin == jax::cpu){
i_nums->setMemoryState(jax::cpu);//returning i_nums with state = cpu
}
else if(origin == jax::both){
i_nums->transferMemoryTo(jax::cpu);//just make sure i_nums.fore = both
}
//else origin was on the gpu so no need to do anything
}
template<typename T>
bool printTest(unsigned long numElements, T* data, T* truth){
for(int i = 0; i < numElements; ++i){
if(data[i] != truth[i]){
std::cout<<"test failed"<<std::endl;
return false;
}
}
std::cout<<"test passed"<<std::endl;
return true;
}
int main(int argc, char *argv[]){
try{
int fullpass = 0;
/*
Instantiate a Unity of a certain length from a nullptr.
*/
std::cout<<"test proper fore usage, Unity<T>::transferMemoryTo and Unity<T>::setMemoryState\n";
jax::Unity<int> i_nums = jax::Unity<int>(nullptr,100,jax::cpu);
std::vector<int> truth;
/*
Fill host with information.
*/
for(int i = 0; i < 100; ++i){
i_nums.host[i] = i-100;
truth.push_back((i-100)+100);
}
/*
Transfer to gpu.
*/
i_nums.transferMemoryTo(jax::gpu);
std::cout<<"\tafter i_nums.transferMemoryTo(jax::gpu) ";
i_nums.printInfo();
add_100<<<i_nums.size(),1>>>(i_nums.size(),i_nums.device);
/*
Now make sure Unity knows that I have changed values on
the device because memory is also on the CPU.
*/
i_nums.setFore(jax::gpu);
std::cout<<"\tafter i_nums.setFore(jax::gpu) ";
i_nums.printInfo();
/*
Now I want the memory on the cpu but not on the gpu anymore.
NOTE: due to not calling cudaDeviceSynchronize()
this is the threadfence as it uses cudaMemcpy.
*/
i_nums.setMemoryState(jax::cpu);
CudaCheckError();//cuda error checker
std::cout<<"\tafter i_nums.setMemoryState(jax::cpu) ";
i_nums.printInfo();
std::cout<<"\t";
/*
Because we setFore Unity knew that I recently updated
i_nums.device so it transfered that update to i_nums.host
before deleting i_nums.device.
*/
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
Now lets delete and replace i_nums data. Unity<T>::setData
does not copy the passed data it actually uses it and sets
device or host to it.
*/
std::cout<<"testing Unity<T>::setData"<<std::endl;
int* replacement = new int[1000]();
for(int i = 0; i < 1000; ++i){
replacement[i] = -i-100;
if(i < 100) truth[i] = -i;
else truth.push_back(-i);
}
i_nums.setData(replacement,1000,jax::cpu);
std::cout<<"\t";
fullpass += printTest<int>(i_nums.size(),i_nums.host,replacement);
/*
Now lets use a user function that assumes i_nums should return in
the same state it was in when passed to it.
*/
std::cout<<"testing user function handling state properly\n\t";
add_100(&i_nums);
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
Now lets resize the Unity so that only the first
10 elements are kept.
*/
std::cout<<"testing Unity<T>::resize\n\t";
i_nums.transferMemoryTo(jax::gpu);//Transfer to gpu, setting i_nums.state & fore to both
std::cout<<"after i_nums.transferMemoryTo(jax::gpu) ";
i_nums.printInfo();
std::cout<<"\t";
i_nums.resize(10);
std::cout<<"after i_nums.resize(10) ";
i_nums.printInfo();
std::cout<<"\ttest "<<((i_nums.size() == 10) ? "passed" : "failed")<<std::endl;
/*
Now lets test the zeroOut feature, which is essentially clear()
without deleting device or host.
*/
std::cout<<"testing Unity<T>::zeroOut\n\t";
i_nums.zeroOut(jax::cpu);
std::cout<<"after i_nums.zeroOut(jax::cpu) ";
i_nums.printInfo();
truth.clear();
for(int i = 0; i < 10; ++i){
truth.push_back(0);
}
std::cout<<"\t";
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
As the zeroOut function is setting i_nums.fore = cpu, we
can setFore(gpu) and any transfer other than clear will
give host back the original data. This also shows
how improperly tracking and setting fore can lead to
changes being overwritten. Unity<T>::fore is used to inform
Unity about changes to a particular memory address and is
vital in utilizing Unity.
NOTE: Unity will also not allow you to set
fore to both manually, this is done within unity by using transferMemoryState()
or setMemoryStateTo(both).
*/
std::cout<<"testing result of improper fore tracking with Unity<T>::transferMemoryTo\n\t";
i_nums.setFore(jax::gpu);
std::cout<<"after i_nums.setFore(jax::gpu) ";
i_nums.printInfo();
i_nums.transferMemoryTo(jax::cpu);
std::cout<<"\tafter i_nums.transferMemoryTo(jax::cpu) ";
i_nums.printInfo();
std::cout<<"\t";
for(int i = 0; i < 10; ++i){
truth[i] = -i;
}
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
/*
Another example where fore messes things up
*/
std::cout<<"testing result of improper fore tracking with Unity<T>::setMemoryState\n\t";
i_nums.zeroOut(jax::gpu);//now gpu = fore as Unity sets it when zeroOut is called
std::cout<<"after i_nums.zeroOut(jax::gpu) ";
i_nums.printInfo();
for(int i = 0; i < i_nums.size(); ++i){
i_nums.host[i] = i;
truth[i] = 0;
}
/*
So now i_nums.host = {0,1,2,3,4,5,6,7,8,9}, butttttt
NOTE: if you try and transfer memory to the same state as fore
nothing will happen and a warning will be logged.
*/
i_nums.transferMemoryTo(jax::gpu);//example of transfer doing nothing due to fore being set to gpu
std::cout<<"\tafter i_nums.transferMemoryTo(jax::gpu) ";
i_nums.printInfo();
i_nums.setMemoryState(jax::cpu);//as gpu is fore this will transfer gpu to cpu before deleting i_nums.device
std::cout<<"\tafter i_nums.setMemoryState(jax::cpu) ";
i_nums.printInfo();
std::cout<<"\t";
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
i_nums.resize(100);
truth.clear();
for(int i = 0; i < 100; ++i){
i_nums.host[i] = i;
truth.push_back(99-i);
}
std::cout<<"testing result sorting with custom greater than\n\t";
jax::Unity<int>::comp_ptr greater_host;
cudaMemcpyFromSymbol(&greater_host, greater_device, sizeof(jax::Unity<int>::comp_ptr));
i_nums.sort(greater_host);// Tmust have overloaded > operator
fullpass += printTest<int>(i_nums.size(),i_nums.host,&truth[0]);
truth.clear();
for(int i = 0; i < 70; ++i){
truth.push_back(i);
}
std::cout<<"testing copy if constructor\n\t";
jax::Unity<int>::pred_ptr less70_host;
cudaMemcpyFromSymbol(&less70_host, less70_device, sizeof(jax::Unity<int>::pred_ptr));
jax::Unity<int> i_nums_keep = jax::Unity<int>(&i_nums,less70_host);
i_nums_keep.sort();
fullpass += printTest<int>(i_nums_keep.size(),i_nums_keep.host,&truth[0]);
std::cout<<"testing remove if\n\t"<<std::endl;
jax::Unity<int>::pred_ptr greq70_host;
cudaMemcpyFromSymbol(&greq70_host, greq70_device, sizeof(jax::Unity<int>::pred_ptr));
i_nums.remove(greq70_host);
i_nums.sort();
fullpass += printTest<int>(i_nums.size(),i_nums.host,i_nums_keep.host);
if(fullpass == 9){
std::cout<<"ALL TESTS PASSED"<<std::endl;
}
/*
Thats the basic concept of Unity!
And it can hold ANY type, just
replace <int> with <T> T=type of your data.
*/
return 0;
}
catch (const std::exception &e){
std::cerr << "Caught exception: " << e.what() << '\n';
std::exit(1);
}
catch (...){
std::cerr << "Caught unknown exception\n";
std::exit(1);
}
}
|
2824fd8b1a1d05dd5d955543c51c7dda2f1d4153.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void vecAdd_kernel(const int *A, const int *B, int *C)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
}
void vecAdd(const int *A, const int *B, int *C, int N)
{
hipLaunchKernelGGL(( vecAdd_kernel) , dim3(1), dim3(N) , 0, 0, A, B, C);
} | 2824fd8b1a1d05dd5d955543c51c7dda2f1d4153.cu | __global__ void vecAdd_kernel(const int *A, const int *B, int *C)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
}
void vecAdd(const int *A, const int *B, int *C, int N)
{
vecAdd_kernel <<< 1, N >>> (A, B, C);
} |
e64ddc36eebe0432e72da9063afc5abbcd7b54fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
void vectorAdd(int A_h [][N], int B_h [][N], int C_h [][N]);
__global__ void vectorAddKernel(int *A_d, int *B_d, int *C_d);
int main()
{
int A_h[N][N];
int B_h[N][N];
int C_h[N][N];
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
A_h[i][j] = 2;
B_h[i][j] = 1;
}
}
vectorAdd(A_h, B_h, C_h);
for(int r = 0; r < N; r++)
{
for(int c = 0; c < N; c++)
{
printf("C[%d][%d] is %d\n", r, c, C_h[r][c]);
}
}
return 0;
}
void vectorAdd(int A_h[][N], int B_h[][N], int C_h[][N])
{
int size = N * N * sizeof(int);
int *A_d, *B_d, *C_d;
hipMalloc((void**)&A_d, size);
hipMalloc((void**)&B_d, size);
hipMalloc((void**)&C_d, size);
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vectorAddKernel), dim3(1), dim3(1024) , 0, 0, A_d, B_d, C_d);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
}
__global__ void vectorAddKernel(int *A_d, int *B_d, int *C_d)
{
int id;
for(int i = 0; i < 4; i++)
{
id = threadIdx.x * 4 + i;
C_d[id]= A_d[id] + B_d[id];
}
}
| e64ddc36eebe0432e72da9063afc5abbcd7b54fc.cu | #include <stdio.h>
#define N 64
void vectorAdd(int A_h [][N], int B_h [][N], int C_h [][N]);
__global__ void vectorAddKernel(int *A_d, int *B_d, int *C_d);
int main()
{
int A_h[N][N];
int B_h[N][N];
int C_h[N][N];
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
A_h[i][j] = 2;
B_h[i][j] = 1;
}
}
vectorAdd(A_h, B_h, C_h);
for(int r = 0; r < N; r++)
{
for(int c = 0; c < N; c++)
{
printf("C[%d][%d] is %d\n", r, c, C_h[r][c]);
}
}
return 0;
}
void vectorAdd(int A_h[][N], int B_h[][N], int C_h[][N])
{
int size = N * N * sizeof(int);
int *A_d, *B_d, *C_d;
cudaMalloc((void**)&A_d, size);
cudaMalloc((void**)&B_d, size);
cudaMalloc((void**)&C_d, size);
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
vectorAddKernel<<< 1, 1024 >>>(A_d, B_d, C_d);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
__global__ void vectorAddKernel(int *A_d, int *B_d, int *C_d)
{
int id;
for(int i = 0; i < 4; i++)
{
id = threadIdx.x * 4 + i;
C_d[id]= A_d[id] + B_d[id];
}
}
|
7b0501debae93979a58594cebbccdb4a7ab754e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "complex.h"
#include "cumath.h"
#include "myreduction.h"
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T> struct SharedMemory {
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<> struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
bool isPow2(unsigned int x) { return cumath::is_pow_2(x); }
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
template <class T>
void reduce(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size)) {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
}
| 7b0501debae93979a58594cebbccdb4a7ab754e3.cu |
#include "complex.h"
#include "cumath.h"
#include "myreduction.h"
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T> struct SharedMemory {
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<> struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
bool isPow2(unsigned int x) { return cumath::is_pow_2(x); }
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
template <class T>
void reduce(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
}
|
876f42afcbb8cdf0242ca884670c18e4e3551573.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
// Array access macros
#define im(i,j) A[(i) + (j)*(m)]
#define f(i,j) f[(i) + (j)*(m)]
#define Z(i,j) Z[(i) + (j)*m]
__global__ void Zev(float const * const A, float *Z,float const * const H, int m, int n,int patch,float patchSigma,float filtsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(patch-1)/2 && y<n-(patch-1)/2){
int i,j,p,l,count=0;
patch=(patch-1) /2;
float temp=0.0,sum=0;
for(p=patch;p<m-patch;p++){
for(l=patch;l<n-patch;l++){
for(i=-patch;i<=patch;i++){
for(j=-patch;j<=patch;j++){
temp=(im(x+patch+i,y+patch+j)-im(p+i,l+j))*H[count];
sum=sum+temp*temp;
count++;
}
}
Z(x+patch,y+patch)=Z(x+patch,y+patch)+exp(-(sum/(filtsigma)));
sum=0;
count=0;
}
}
}
}
__global__ void fev(float const * const A,float const * const Z, float *f,float const * const H, int m, int n,int patch,float patchSigma,float filtsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(patch-1)/2 && y<n-(patch-1)/2){
patch=(patch-1) /2;
int i,j;
float temp,sum=0.0;
int p,l,count=0;
for(p=patch;p<m-patch;p++){
for(l=patch;l<n-patch;l++){
for(i=-patch;i<=patch;i++){
for(j=-patch;j<=patch;j++){
temp=(im(x+patch+i,y+patch+j)-im(p+i,l+j))*H[count];
sum=sum+temp*temp;
count++;
}
}
count=0;
f(x+patch,y+patch)=f(x+patch,y+patch)+((1/Z(x+patch,y+patch))*exp(-(sum/filtsigma)))*im(p,l);
sum=0;
}
}
}
}
| 876f42afcbb8cdf0242ca884670c18e4e3551573.cu | #include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
// Array access macros
#define im(i,j) A[(i) + (j)*(m)]
#define f(i,j) f[(i) + (j)*(m)]
#define Z(i,j) Z[(i) + (j)*m]
__global__ void Zev(float const * const A, float *Z,float const * const H, int m, int n,int patch,float patchSigma,float filtsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(patch-1)/2 && y<n-(patch-1)/2){
int i,j,p,l,count=0;
patch=(patch-1) /2;
float temp=0.0,sum=0;
for(p=patch;p<m-patch;p++){
for(l=patch;l<n-patch;l++){
for(i=-patch;i<=patch;i++){
for(j=-patch;j<=patch;j++){
temp=(im(x+patch+i,y+patch+j)-im(p+i,l+j))*H[count];
sum=sum+temp*temp;
count++;
}
}
Z(x+patch,y+patch)=Z(x+patch,y+patch)+exp(-(sum/(filtsigma)));
sum=0;
count=0;
}
}
}
}
__global__ void fev(float const * const A,float const * const Z, float *f,float const * const H, int m, int n,int patch,float patchSigma,float filtsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(patch-1)/2 && y<n-(patch-1)/2){
patch=(patch-1) /2;
int i,j;
float temp,sum=0.0;
int p,l,count=0;
for(p=patch;p<m-patch;p++){
for(l=patch;l<n-patch;l++){
for(i=-patch;i<=patch;i++){
for(j=-patch;j<=patch;j++){
temp=(im(x+patch+i,y+patch+j)-im(p+i,l+j))*H[count];
sum=sum+temp*temp;
count++;
}
}
count=0;
f(x+patch,y+patch)=f(x+patch,y+patch)+((1/Z(x+patch,y+patch))*exp(-(sum/filtsigma)))*im(p,l);
sum=0;
}
}
}
}
|
c8f4c714ef487f084f92f48133b2ff119ea3ba9e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <unistd.h>
#include <cstdlib>
#include <string>
#include <cmath>
#include <algorithm>
#include <list>
#include "CycleTimer.h"
#include <hip/hip_runtime.h>
#include "utils.h"
#include "edge_bc.h"
#define THRESHOLD 256
using namespace std;
__global__ void forward_edge (int *d_v, int *d_e, int *d_d, int *d_sigma, bool *done, int *d_dist, int num_edges) {
int tid = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(tid < num_edges) {
/* for each edge (u, w) */
int u = d_v[tid];
if(d_d[u] == *d_dist) {
int w = d_e[tid];
if(d_d[w] == -1) {
d_d[w] = *d_dist + 1;
*done = false;
}
if(d_d[w] == *d_dist + 1) {
atomicAdd(&d_sigma[w], d_sigma[u]);
}
}
}
}
__global__ void backward_edge (int *d_v, int *d_e, int *d_d, int *d_sigma, float *d_delta, int *d_dist, int num_edges) {
int tid = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(tid < num_edges) {
int u = d_v[tid];
if(d_d[u] == *d_dist - 1) {
int w = d_e[tid];
if(d_d[w] == *d_dist) {
atomicAdd(&d_delta[u], 1.0f*d_sigma[u]/d_sigma[w]*(1.0f+d_delta[w]));
//printf("updated node %d 's delta value to %f\n", u, d_delta[u]);
}
}
}
}
__global__ void backsum_edge (int s, int *d_d, float *d_delta, float *d_bc, int num_nodes) {
int tid = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(tid < num_nodes && tid != s && d_d[tid] != -1) {
d_bc[tid] += d_delta[tid];
}
}
__global__ void init_edge (int s, int *d_d, int *d_sigma, int num_nodes, int* d_dist) {
int i = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(i < num_nodes) {
d_d[i] = -1;
d_sigma[i] = 0;
if(s == i) {
d_d[i] = 0;
d_sigma[i] = 1;
*d_dist = 0;
}
}
}
__global__ void set_edge (int* dest, int val) {
*dest = val;
}
int bc_edge (int* v, int* e, int num_nodes, int num_edges, int nb, float* bc) {
int *d_v, *d_e, *d_d, *d_sigma, *d_dist, h_dist;
float *d_delta, *d_bc;
bool h_done, *done;
checkCudaErrors(hipMalloc((void**)&d_v, sizeof(int) * num_edges));
checkCudaErrors(hipMalloc((void**)&d_e, sizeof(int) * num_edges));
checkCudaErrors(hipMemcpy(d_v, v, sizeof(int) * num_edges, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_e, e, sizeof(int) * num_edges, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_d, sizeof(int) * num_nodes));
checkCudaErrors(hipMalloc((void**)&d_sigma, sizeof(int) * num_nodes));
checkCudaErrors(hipMalloc((void**)&d_delta, sizeof(float) * num_nodes));
checkCudaErrors(hipMalloc((void**)&d_dist, sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_bc, sizeof(float) * num_nodes));
checkCudaErrors(hipMemset(d_bc, 0, sizeof(int) * num_nodes));
checkCudaErrors(hipMalloc((void **)&done, sizeof(bool)));
int threads_per_block = num_edges;
int blocks = 1;
if (num_edges > THRESHOLD) {
blocks = (int)ceil(num_edges / (float) THRESHOLD);
blocks = (int)ceil(sqrt((float)blocks));
threads_per_block = THRESHOLD;
}
dim3 num_blocks;
num_blocks.x = blocks;
num_blocks.y = blocks;
dim3 threadsPerBlock(threads_per_block);
int threads_per_block2 = num_nodes;
int blocks2 = 1;
if (num_nodes > THRESHOLD) {
blocks2 = (int)ceil(num_nodes / (double)THRESHOLD);
blocks2 = (int)ceil(sqrt((float)blocks2));
threads_per_block2 = THRESHOLD;
}
dim3 num_blocks2;
num_blocks2.x = blocks2;
num_blocks2.y = blocks2;
dim3 threadsPerBlock2(threads_per_block2);
double begin = CycleTimer::currentSeconds();
for (int i = 0; i < min(nb, num_nodes); ++i) {
h_dist = 0;
hipLaunchKernelGGL(( init_edge), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, i, d_d, d_sigma, num_nodes, d_dist);
// forward propagation
do {
checkCudaErrors(hipMemset(done, 1, sizeof(bool)));
hipLaunchKernelGGL(( forward_edge) , dim3(num_blocks), dim3(threadsPerBlock), 0, 0, d_v, d_e, d_d, d_sigma, done, d_dist, num_edges);
hipLaunchKernelGGL(( set_edge), dim3(1), dim3(1), 0, 0, d_dist, ++h_dist);
checkCudaErrors(hipMemcpy(&h_done, done, sizeof(bool), hipMemcpyDeviceToHost));
} while (!h_done);
// backward propagation
checkCudaErrors(hipMemset(d_delta, 0, sizeof(int) * num_nodes));
//printf("forward done successfully\n");
hipLaunchKernelGGL(( set_edge), dim3(1), dim3(1), 0, 0, d_dist, --h_dist);
while (h_dist > 1) {
hipLaunchKernelGGL(( backward_edge) , dim3(num_blocks), dim3(threadsPerBlock), 0, 0, d_v, d_e, d_d, d_sigma, d_delta, d_dist, num_edges);
hipDeviceSynchronize();
hipLaunchKernelGGL(( set_edge), dim3(1), dim3(1), 0, 0, d_dist, --h_dist);
}
hipLaunchKernelGGL(( backsum_edge) , dim3(num_blocks2), dim3(threadsPerBlock2), 0, 0, i, d_d, d_delta, d_bc, num_nodes);
hipDeviceSynchronize();
}
double end = CycleTimer::currentSeconds();
printf("Edge parallel computation takes %f s\n", end - begin);
checkCudaErrors(hipMemcpy(bc, d_bc, sizeof(float)*num_nodes, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_v));
checkCudaErrors(hipFree(d_e));
checkCudaErrors(hipFree(d_d));
checkCudaErrors(hipFree(d_sigma));
checkCudaErrors(hipFree(d_delta));
checkCudaErrors(hipFree(d_dist));
checkCudaErrors(hipFree(d_bc));
checkCudaErrors(hipFree(done));
return 0;
}
| c8f4c714ef487f084f92f48133b2ff119ea3ba9e.cu | #include <iostream>
#include <unistd.h>
#include <cstdlib>
#include <string>
#include <cmath>
#include <algorithm>
#include <list>
#include "CycleTimer.h"
#include <cuda.h>
#include "utils.h"
#include "edge_bc.h"
#define THRESHOLD 256
using namespace std;
__global__ void forward_edge (int *d_v, int *d_e, int *d_d, int *d_sigma, bool *done, int *d_dist, int num_edges) {
int tid = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(tid < num_edges) {
/* for each edge (u, w) */
int u = d_v[tid];
if(d_d[u] == *d_dist) {
int w = d_e[tid];
if(d_d[w] == -1) {
d_d[w] = *d_dist + 1;
*done = false;
}
if(d_d[w] == *d_dist + 1) {
atomicAdd(&d_sigma[w], d_sigma[u]);
}
}
}
}
__global__ void backward_edge (int *d_v, int *d_e, int *d_d, int *d_sigma, float *d_delta, int *d_dist, int num_edges) {
int tid = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(tid < num_edges) {
int u = d_v[tid];
if(d_d[u] == *d_dist - 1) {
int w = d_e[tid];
if(d_d[w] == *d_dist) {
atomicAdd(&d_delta[u], 1.0f*d_sigma[u]/d_sigma[w]*(1.0f+d_delta[w]));
//printf("updated node %d 's delta value to %f\n", u, d_delta[u]);
}
}
}
}
__global__ void backsum_edge (int s, int *d_d, float *d_delta, float *d_bc, int num_nodes) {
int tid = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(tid < num_nodes && tid != s && d_d[tid] != -1) {
d_bc[tid] += d_delta[tid];
}
}
__global__ void init_edge (int s, int *d_d, int *d_sigma, int num_nodes, int* d_dist) {
int i = blockIdx.x * blockDim.x * gridDim.y + blockIdx.y * blockDim.x + threadIdx.x;
if(i < num_nodes) {
d_d[i] = -1;
d_sigma[i] = 0;
if(s == i) {
d_d[i] = 0;
d_sigma[i] = 1;
*d_dist = 0;
}
}
}
__global__ void set_edge (int* dest, int val) {
*dest = val;
}
int bc_edge (int* v, int* e, int num_nodes, int num_edges, int nb, float* bc) {
int *d_v, *d_e, *d_d, *d_sigma, *d_dist, h_dist;
float *d_delta, *d_bc;
bool h_done, *done;
checkCudaErrors(cudaMalloc((void**)&d_v, sizeof(int) * num_edges));
checkCudaErrors(cudaMalloc((void**)&d_e, sizeof(int) * num_edges));
checkCudaErrors(cudaMemcpy(d_v, v, sizeof(int) * num_edges, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_e, e, sizeof(int) * num_edges, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_d, sizeof(int) * num_nodes));
checkCudaErrors(cudaMalloc((void**)&d_sigma, sizeof(int) * num_nodes));
checkCudaErrors(cudaMalloc((void**)&d_delta, sizeof(float) * num_nodes));
checkCudaErrors(cudaMalloc((void**)&d_dist, sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_bc, sizeof(float) * num_nodes));
checkCudaErrors(cudaMemset(d_bc, 0, sizeof(int) * num_nodes));
checkCudaErrors(cudaMalloc((void **)&done, sizeof(bool)));
int threads_per_block = num_edges;
int blocks = 1;
if (num_edges > THRESHOLD) {
blocks = (int)ceil(num_edges / (float) THRESHOLD);
blocks = (int)ceil(sqrt((float)blocks));
threads_per_block = THRESHOLD;
}
dim3 num_blocks;
num_blocks.x = blocks;
num_blocks.y = blocks;
dim3 threadsPerBlock(threads_per_block);
int threads_per_block2 = num_nodes;
int blocks2 = 1;
if (num_nodes > THRESHOLD) {
blocks2 = (int)ceil(num_nodes / (double)THRESHOLD);
blocks2 = (int)ceil(sqrt((float)blocks2));
threads_per_block2 = THRESHOLD;
}
dim3 num_blocks2;
num_blocks2.x = blocks2;
num_blocks2.y = blocks2;
dim3 threadsPerBlock2(threads_per_block2);
double begin = CycleTimer::currentSeconds();
for (int i = 0; i < min(nb, num_nodes); ++i) {
h_dist = 0;
init_edge<<<num_blocks, threadsPerBlock>>>(i, d_d, d_sigma, num_nodes, d_dist);
// forward propagation
do {
checkCudaErrors(cudaMemset(done, 1, sizeof(bool)));
forward_edge <<<num_blocks, threadsPerBlock>>>(d_v, d_e, d_d, d_sigma, done, d_dist, num_edges);
set_edge<<<1, 1>>>(d_dist, ++h_dist);
checkCudaErrors(cudaMemcpy(&h_done, done, sizeof(bool), cudaMemcpyDeviceToHost));
} while (!h_done);
// backward propagation
checkCudaErrors(cudaMemset(d_delta, 0, sizeof(int) * num_nodes));
//printf("forward done successfully\n");
set_edge<<<1, 1>>>(d_dist, --h_dist);
while (h_dist > 1) {
backward_edge <<<num_blocks, threadsPerBlock>>>(d_v, d_e, d_d, d_sigma, d_delta, d_dist, num_edges);
cudaThreadSynchronize();
set_edge<<<1, 1>>>(d_dist, --h_dist);
}
backsum_edge <<<num_blocks2, threadsPerBlock2>>>(i, d_d, d_delta, d_bc, num_nodes);
cudaThreadSynchronize();
}
double end = CycleTimer::currentSeconds();
printf("Edge parallel computation takes %f s\n", end - begin);
checkCudaErrors(cudaMemcpy(bc, d_bc, sizeof(float)*num_nodes, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_v));
checkCudaErrors(cudaFree(d_e));
checkCudaErrors(cudaFree(d_d));
checkCudaErrors(cudaFree(d_sigma));
checkCudaErrors(cudaFree(d_delta));
checkCudaErrors(cudaFree(d_dist));
checkCudaErrors(cudaFree(d_bc));
checkCudaErrors(cudaFree(done));
return 0;
}
|
36a4b8cf32902e1969f0a5508f993b064104aced.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: gradient
* file: gradient.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p107";
const char* studentName = "Marco Servalli";
const int studentID = 3626387;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* derivativeY_sm_d(const float *inputImage, ... )
* derivativeY_sm_d(const float3 *inputImage, ... )
* gradient_magnitude_d(const float *inputImage, ... )
* gradient_magnitude_d(const float3 *inputImage, ... )
*
\****************************************************************************/
#include "gradient.cuh"
#define BW 16
#define BH 16
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
__global__ void derivativeX_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1))
u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y]);
}
__global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue;
__shared__ float3 u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
__syncthreads();
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x);
imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y);
imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void derivativeY_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x][threadIdx.y+2]-u[threadIdx.x][threadIdx.y]);
}
__global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue;
__shared__ float3 u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x);
imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y);
imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void gradient_magnitude_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0)
u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + (y)*iPitchBytes)+x-1);
if (x == (iWidth-1))
u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
if (y == 0)
u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x+1][threadIdx.y+2] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x+1][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
//compute the magnitude
float dIdx, dIdy, magn;
if (x < iWidth && y < iHeight) {
dIdx = 0.5f*(u[threadIdx.x+2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]);
dIdy = 0.5f*(u[threadIdx.x+1][threadIdx.y+2]-u[threadIdx.x+1][threadIdx.y]);
magn = sqrt(dIdx*dIdx + dIdy*dIdy);
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = magn;
}
}
__global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float3 u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0)
u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + (y)*iPitchBytes)+x-1);
if (x == (iWidth-1))
u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
if (y == 0)
u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x+1][threadIdx.y+2] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x+1][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
//compute the magnitude
float3 dIdx, dIdy, magn;
if (x < iWidth && y < iHeight) {
dIdx.x = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].x - u[threadIdx.x][threadIdx.y+1].x);
dIdx.y = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].y - u[threadIdx.x][threadIdx.y+1].y);
dIdx.z = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].z - u[threadIdx.x][threadIdx.y+1].z);
dIdy.x = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].x - u[threadIdx.x+1][threadIdx.y].x);
dIdy.y = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].y - u[threadIdx.x+1][threadIdx.y].y);
dIdy.z = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].z - u[threadIdx.x+1][threadIdx.y].z);
magn.x = sqrt(dIdx.x*dIdx.x + dIdy.x*dIdy.x);
magn.y = sqrt(dIdx.y*dIdx.y + dIdy.y*dIdy.y);
magn.z = sqrt(dIdx.z*dIdx.z + dIdy.z*dIdy.z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = magn;
}
}
void gpu_derivative_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, int iSpectrum, int mode)
{
size_t iPitchBytes;
float *inputImage_d = 0, *outputImage_d = 0;
dim3 blockSize(BW, BH);
dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) );
//dim3 smSize(BW+2,BH);
if(iSpectrum == 1) {
cutilSafeCall( hipMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( hipMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( hipMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, hipMemcpyHostToDevice) );
if (mode == 0)
hipLaunchKernelGGL(( derivativeX_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
hipLaunchKernelGGL(( derivativeY_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
hipLaunchKernelGGL(( gradient_magnitude_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( hipDeviceSynchronize() );
cutilSafeCall( hipMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, hipMemcpyDeviceToHost) );
}
else if(iSpectrum == 3) {
cutilSafeCall( hipMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( hipMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( hipMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, hipMemcpyHostToDevice) );
if (mode == 0)
hipLaunchKernelGGL(( derivativeX_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
hipLaunchKernelGGL(( derivativeY_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
hipLaunchKernelGGL(( gradient_magnitude_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( hipDeviceSynchronize() );
cutilSafeCall( hipMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, hipMemcpyDeviceToHost) );
}
cutilSafeCall( hipFree(inputImage_d) );
cutilSafeCall( hipFree(outputImage_d) );
}
| 36a4b8cf32902e1969f0a5508f993b064104aced.cu | /****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: gradient
* file: gradient.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p107";
const char* studentName = "Marco Servalli";
const int studentID = 3626387;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* derivativeY_sm_d(const float *inputImage, ... )
* derivativeY_sm_d(const float3 *inputImage, ... )
* gradient_magnitude_d(const float *inputImage, ... )
* gradient_magnitude_d(const float3 *inputImage, ... )
*
\****************************************************************************/
#include "gradient.cuh"
#define BW 16
#define BH 16
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
__global__ void derivativeX_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1))
u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y]);
}
__global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue;
__shared__ float3 u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
__syncthreads();
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x);
imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y);
imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void derivativeY_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x][threadIdx.y+2]-u[threadIdx.x][threadIdx.y]);
}
__global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue;
__shared__ float3 u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (y == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x][threadIdx.y+2] = u[threadIdx.x][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x);
imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y);
imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void gradient_magnitude_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0)
u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + (y)*iPitchBytes)+x-1);
if (x == (iWidth-1))
u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
if (y == 0)
u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x+1][threadIdx.y+2] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x+1][threadIdx.y+2] = *((float*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
//compute the magnitude
float dIdx, dIdy, magn;
if (x < iWidth && y < iHeight) {
dIdx = 0.5f*(u[threadIdx.x+2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]);
dIdy = 0.5f*(u[threadIdx.x+1][threadIdx.y+2]-u[threadIdx.x+1][threadIdx.y]);
magn = sqrt(dIdx*dIdx + dIdy*dIdy);
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = magn;
}
}
__global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float3 u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0)
u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == 0)
u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + (y)*iPitchBytes)+x-1);
if (x == (iWidth-1))
u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == blockDim.x-1)
u[threadIdx.x+2][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
if (y == 0)
u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == 0)
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + (y-1)*iPitchBytes)+x);
if (y == (iHeight-1))
u[threadIdx.x+1][threadIdx.y+2] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.y == blockDim.y-1)
u[threadIdx.x+1][threadIdx.y+2] = *((float3*)((char*)inputImage + (y+1)*iPitchBytes)+x);
}
__syncthreads();
//compute the magnitude
float3 dIdx, dIdy, magn;
if (x < iWidth && y < iHeight) {
dIdx.x = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].x - u[threadIdx.x][threadIdx.y+1].x);
dIdx.y = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].y - u[threadIdx.x][threadIdx.y+1].y);
dIdx.z = 0.5f*(u[threadIdx.x+2][threadIdx.y+1].z - u[threadIdx.x][threadIdx.y+1].z);
dIdy.x = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].x - u[threadIdx.x+1][threadIdx.y].x);
dIdy.y = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].y - u[threadIdx.x+1][threadIdx.y].y);
dIdy.z = 0.5f*(u[threadIdx.x+1][threadIdx.y+2].z - u[threadIdx.x+1][threadIdx.y].z);
magn.x = sqrt(dIdx.x*dIdx.x + dIdy.x*dIdy.x);
magn.y = sqrt(dIdx.y*dIdx.y + dIdy.y*dIdy.y);
magn.z = sqrt(dIdx.z*dIdx.z + dIdy.z*dIdy.z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = magn;
}
}
void gpu_derivative_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, int iSpectrum, int mode)
{
size_t iPitchBytes;
float *inputImage_d = 0, *outputImage_d = 0;
dim3 blockSize(BW, BH);
dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) );
//dim3 smSize(BW+2,BH);
if(iSpectrum == 1) {
cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, cudaMemcpyHostToDevice) );
if (mode == 0)
derivativeX_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
derivativeY_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
gradient_magnitude_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( cudaThreadSynchronize() );
cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, cudaMemcpyDeviceToHost) );
}
else if(iSpectrum == 3) {
cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, cudaMemcpyHostToDevice) );
if (mode == 0)
derivativeX_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
derivativeY_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
gradient_magnitude_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( cudaThreadSynchronize() );
cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, cudaMemcpyDeviceToHost) );
}
cutilSafeCall( cudaFree(inputImage_d) );
cutilSafeCall( cudaFree(outputImage_d) );
}
|
d709bba3cb57715b26d67e83092afa4538c4ef5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014-2017 Oxford University Innovation Limited and the authors of ITM
#include "ITMViewBuilder_CUDA.h"
#include "../Shared/ITMViewBuilder_Shared.h"
#include "../../../../ORUtils/CUDADefines.h"
#include "../../../../ORUtils/MemoryBlock.h"
using namespace ITMLib;
using namespace ORUtils;
ITMViewBuilder_CUDA::ITMViewBuilder_CUDA(const ITMRGBDCalib& calib):ITMViewBuilder(calib) { }
ITMViewBuilder_CUDA::~ITMViewBuilder_CUDA(void) { }
//---------------------------------------------------------------------------
//
// kernel function declaration
//
//---------------------------------------------------------------------------
__global__ void convertDisparityToDepth_device(float *depth_out, const short *depth_in, Vector2f disparityCalibParams, float fx_depth, Vector2i imgSize);
__global__ void convertDepthAffineToFloat_device(float *d_out, const short *d_in, Vector2i imgSize, Vector2f depthCalibParams);
__global__ void filterDepth_device(float *imageData_out, const float *imageData_in, Vector2i imgDims);
__global__ void ComputeNormalAndWeight_device(const float* depth_in, Vector4f* normal_out, float *sigmaL_out, Vector2i imgDims, Vector4f intrinsic);
//---------------------------------------------------------------------------
//
// host methods
//
//---------------------------------------------------------------------------
void ITMViewBuilder_CUDA::UpdateView(ITMView **view_ptr, ITMUChar4Image *rgbImage, ITMShortImage *rawDepthImage, bool useBilateralFilter, bool modelSensorNoise, bool storePreviousImage)
{
if (*view_ptr == NULL)
{
*view_ptr = new ITMView(calib, rgbImage->noDims, rawDepthImage->noDims, true);
if (this->shortImage != NULL) delete this->shortImage;
this->shortImage = new ITMShortImage(rawDepthImage->noDims, true, true);
if (this->floatImage != NULL) delete this->floatImage;
this->floatImage = new ITMFloatImage(rawDepthImage->noDims, true, true);
if (modelSensorNoise)
{
(*view_ptr)->depthNormal = new ITMFloat4Image(rawDepthImage->noDims, true, true);
(*view_ptr)->depthUncertainty = new ITMFloatImage(rawDepthImage->noDims, true, true);
}
}
ITMView *view = *view_ptr;
if (storePreviousImage)
{
if (!view->rgb_prev) view->rgb_prev = new ITMUChar4Image(rgbImage->noDims, true, true);
else view->rgb_prev->SetFrom(view->rgb, MemoryBlock<Vector4u>::CUDA_TO_CUDA);
}
view->rgb->SetFrom(rgbImage, MemoryBlock<Vector4u>::CPU_TO_CUDA);
this->shortImage->SetFrom(rawDepthImage, MemoryBlock<short>::CPU_TO_CUDA);
switch (view->calib.disparityCalib.GetType())
{
case ITMDisparityCalib::TRAFO_KINECT:
this->ConvertDisparityToDepth(view->depth, this->shortImage, &(view->calib.intrinsics_d), view->calib.disparityCalib.GetParams());
break;
case ITMDisparityCalib::TRAFO_AFFINE:
this->ConvertDepthAffineToFloat(view->depth, this->shortImage, view->calib.disparityCalib.GetParams());
break;
default:
break;
}
if (useBilateralFilter)
{
//5 steps of bilateral filtering
this->DepthFiltering(this->floatImage, view->depth);
this->DepthFiltering(view->depth, this->floatImage);
this->DepthFiltering(this->floatImage, view->depth);
this->DepthFiltering(view->depth, this->floatImage);
this->DepthFiltering(this->floatImage, view->depth);
view->depth->SetFrom(this->floatImage, MemoryBlock<float>::CUDA_TO_CUDA);
}
if (modelSensorNoise)
{
this->ComputeNormalAndWeights(view->depthNormal, view->depthUncertainty, view->depth, view->calib.intrinsics_d.projectionParamsSimple.all);
}
}
void ITMViewBuilder_CUDA::UpdateView(ITMView **view_ptr, ITMUChar4Image *rgbImage, ITMShortImage *depthImage, bool useBilateralFilter, ITMIMUMeasurement *imuMeasurement, bool modelSensorNoise, bool storePreviousImage)
{
if (*view_ptr == NULL)
{
*view_ptr = new ITMViewIMU(calib, rgbImage->noDims, depthImage->noDims, true);
if (this->shortImage != NULL) delete this->shortImage;
this->shortImage = new ITMShortImage(depthImage->noDims, true, true);
if (this->floatImage != NULL) delete this->floatImage;
this->floatImage = new ITMFloatImage(depthImage->noDims, true, true);
if (modelSensorNoise)
{
(*view_ptr)->depthNormal = new ITMFloat4Image(depthImage->noDims, true, true);
(*view_ptr)->depthUncertainty = new ITMFloatImage(depthImage->noDims, true, true);
}
}
ITMViewIMU* imuView = (ITMViewIMU*)(*view_ptr);
imuView->imu->SetFrom(imuMeasurement);
this->UpdateView(view_ptr, rgbImage, depthImage, useBilateralFilter, modelSensorNoise, storePreviousImage);
}
void ITMViewBuilder_CUDA::ConvertDisparityToDepth(ITMFloatImage *depth_out, const ITMShortImage *depth_in, const ITMIntrinsics *depthIntrinsics,
Vector2f disparityCalibParams)
{
Vector2i imgSize = depth_in->noDims;
const short *d_in = depth_in->GetData(MEMORYDEVICE_CUDA);
float *d_out = depth_out->GetData(MEMORYDEVICE_CUDA);
float fx_depth = depthIntrinsics->projectionParamsSimple.fx;
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x / (float)blockSize.x), (int)ceil((float)imgSize.y / (float)blockSize.y));
convertDisparityToDepth_device << <gridSize, blockSize >> >(d_out, d_in, disparityCalibParams, fx_depth, imgSize);
ORcudaKernelCheck;
}
void ITMViewBuilder_CUDA::ConvertDepthAffineToFloat(ITMFloatImage *depth_out, const ITMShortImage *depth_in, Vector2f depthCalibParams)
{
Vector2i imgSize = depth_in->noDims;
const short *d_in = depth_in->GetData(MEMORYDEVICE_CUDA);
float *d_out = depth_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x / (float)blockSize.x), (int)ceil((float)imgSize.y / (float)blockSize.y));
convertDepthAffineToFloat_device << <gridSize, blockSize >> >(d_out, d_in, imgSize, depthCalibParams);
ORcudaKernelCheck;
}
void ITMViewBuilder_CUDA::DepthFiltering(ITMFloatImage *image_out, const ITMFloatImage *image_in)
{
Vector2i imgDims = image_in->noDims;
const float *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
float *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgDims.x / (float)blockSize.x), (int)ceil((float)imgDims.y / (float)blockSize.y));
filterDepth_device << <gridSize, blockSize >> >(imageData_out, imageData_in, imgDims);
ORcudaKernelCheck;
}
void ITMViewBuilder_CUDA::ComputeNormalAndWeights(ITMFloat4Image *normal_out, ITMFloatImage *sigmaZ_out, const ITMFloatImage *depth_in, Vector4f intrinsic)
{
Vector2i imgDims = depth_in->noDims;
const float *depthData_in = depth_in->GetData(MEMORYDEVICE_CUDA);
float *sigmaZData_out = sigmaZ_out->GetData(MEMORYDEVICE_CUDA);
Vector4f *normalData_out = normal_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgDims.x / (float)blockSize.x), (int)ceil((float)imgDims.y / (float)blockSize.y));
ComputeNormalAndWeight_device << <gridSize, blockSize >> >(depthData_in, normalData_out, sigmaZData_out, imgDims, intrinsic);
ORcudaKernelCheck;
}
//---------------------------------------------------------------------------
//
// kernel function implementation
//
//---------------------------------------------------------------------------
__global__ void convertDisparityToDepth_device(float *d_out, const short *d_in, Vector2f disparityCalibParams, float fx_depth, Vector2i imgSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x >= imgSize.x) || (y >= imgSize.y)) return;
convertDisparityToDepth(d_out, x, y, d_in, disparityCalibParams, fx_depth, imgSize);
}
__global__ void convertDepthAffineToFloat_device(float *d_out, const short *d_in, Vector2i imgSize, Vector2f depthCalibParams)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x >= imgSize.x) || (y >= imgSize.y)) return;
convertDepthAffineToFloat(d_out, x, y, d_in, imgSize, depthCalibParams);
}
__global__ void filterDepth_device(float *imageData_out, const float *imageData_in, Vector2i imgDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < 2 || x > imgDims.x - 2 || y < 2 || y > imgDims.y - 2) return;
filterDepth(imageData_out, imageData_in, x, y, imgDims);
}
__global__ void ComputeNormalAndWeight_device(const float* depth_in, Vector4f* normal_out, float *sigmaZ_out, Vector2i imgDims, Vector4f intrinsic)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * imgDims.x;
if (x < 2 || x > imgDims.x - 2 || y < 2 || y > imgDims.y - 2)
{
normal_out[idx].w = -1.0f;
sigmaZ_out[idx] = -1;
return;
}
else
{
computeNormalAndWeight(depth_in, normal_out, sigmaZ_out, x, y, imgDims, intrinsic);
}
}
| d709bba3cb57715b26d67e83092afa4538c4ef5b.cu | // Copyright 2014-2017 Oxford University Innovation Limited and the authors of ITM
#include "ITMViewBuilder_CUDA.h"
#include "../Shared/ITMViewBuilder_Shared.h"
#include "../../../../ORUtils/CUDADefines.h"
#include "../../../../ORUtils/MemoryBlock.h"
using namespace ITMLib;
using namespace ORUtils;
ITMViewBuilder_CUDA::ITMViewBuilder_CUDA(const ITMRGBDCalib& calib):ITMViewBuilder(calib) { }
ITMViewBuilder_CUDA::~ITMViewBuilder_CUDA(void) { }
//---------------------------------------------------------------------------
//
// kernel function declaration
//
//---------------------------------------------------------------------------
__global__ void convertDisparityToDepth_device(float *depth_out, const short *depth_in, Vector2f disparityCalibParams, float fx_depth, Vector2i imgSize);
__global__ void convertDepthAffineToFloat_device(float *d_out, const short *d_in, Vector2i imgSize, Vector2f depthCalibParams);
__global__ void filterDepth_device(float *imageData_out, const float *imageData_in, Vector2i imgDims);
__global__ void ComputeNormalAndWeight_device(const float* depth_in, Vector4f* normal_out, float *sigmaL_out, Vector2i imgDims, Vector4f intrinsic);
//---------------------------------------------------------------------------
//
// host methods
//
//---------------------------------------------------------------------------
void ITMViewBuilder_CUDA::UpdateView(ITMView **view_ptr, ITMUChar4Image *rgbImage, ITMShortImage *rawDepthImage, bool useBilateralFilter, bool modelSensorNoise, bool storePreviousImage)
{
if (*view_ptr == NULL)
{
*view_ptr = new ITMView(calib, rgbImage->noDims, rawDepthImage->noDims, true);
if (this->shortImage != NULL) delete this->shortImage;
this->shortImage = new ITMShortImage(rawDepthImage->noDims, true, true);
if (this->floatImage != NULL) delete this->floatImage;
this->floatImage = new ITMFloatImage(rawDepthImage->noDims, true, true);
if (modelSensorNoise)
{
(*view_ptr)->depthNormal = new ITMFloat4Image(rawDepthImage->noDims, true, true);
(*view_ptr)->depthUncertainty = new ITMFloatImage(rawDepthImage->noDims, true, true);
}
}
ITMView *view = *view_ptr;
if (storePreviousImage)
{
if (!view->rgb_prev) view->rgb_prev = new ITMUChar4Image(rgbImage->noDims, true, true);
else view->rgb_prev->SetFrom(view->rgb, MemoryBlock<Vector4u>::CUDA_TO_CUDA);
}
view->rgb->SetFrom(rgbImage, MemoryBlock<Vector4u>::CPU_TO_CUDA);
this->shortImage->SetFrom(rawDepthImage, MemoryBlock<short>::CPU_TO_CUDA);
switch (view->calib.disparityCalib.GetType())
{
case ITMDisparityCalib::TRAFO_KINECT:
this->ConvertDisparityToDepth(view->depth, this->shortImage, &(view->calib.intrinsics_d), view->calib.disparityCalib.GetParams());
break;
case ITMDisparityCalib::TRAFO_AFFINE:
this->ConvertDepthAffineToFloat(view->depth, this->shortImage, view->calib.disparityCalib.GetParams());
break;
default:
break;
}
if (useBilateralFilter)
{
//5 steps of bilateral filtering
this->DepthFiltering(this->floatImage, view->depth);
this->DepthFiltering(view->depth, this->floatImage);
this->DepthFiltering(this->floatImage, view->depth);
this->DepthFiltering(view->depth, this->floatImage);
this->DepthFiltering(this->floatImage, view->depth);
view->depth->SetFrom(this->floatImage, MemoryBlock<float>::CUDA_TO_CUDA);
}
if (modelSensorNoise)
{
this->ComputeNormalAndWeights(view->depthNormal, view->depthUncertainty, view->depth, view->calib.intrinsics_d.projectionParamsSimple.all);
}
}
void ITMViewBuilder_CUDA::UpdateView(ITMView **view_ptr, ITMUChar4Image *rgbImage, ITMShortImage *depthImage, bool useBilateralFilter, ITMIMUMeasurement *imuMeasurement, bool modelSensorNoise, bool storePreviousImage)
{
if (*view_ptr == NULL)
{
*view_ptr = new ITMViewIMU(calib, rgbImage->noDims, depthImage->noDims, true);
if (this->shortImage != NULL) delete this->shortImage;
this->shortImage = new ITMShortImage(depthImage->noDims, true, true);
if (this->floatImage != NULL) delete this->floatImage;
this->floatImage = new ITMFloatImage(depthImage->noDims, true, true);
if (modelSensorNoise)
{
(*view_ptr)->depthNormal = new ITMFloat4Image(depthImage->noDims, true, true);
(*view_ptr)->depthUncertainty = new ITMFloatImage(depthImage->noDims, true, true);
}
}
ITMViewIMU* imuView = (ITMViewIMU*)(*view_ptr);
imuView->imu->SetFrom(imuMeasurement);
this->UpdateView(view_ptr, rgbImage, depthImage, useBilateralFilter, modelSensorNoise, storePreviousImage);
}
void ITMViewBuilder_CUDA::ConvertDisparityToDepth(ITMFloatImage *depth_out, const ITMShortImage *depth_in, const ITMIntrinsics *depthIntrinsics,
Vector2f disparityCalibParams)
{
Vector2i imgSize = depth_in->noDims;
const short *d_in = depth_in->GetData(MEMORYDEVICE_CUDA);
float *d_out = depth_out->GetData(MEMORYDEVICE_CUDA);
float fx_depth = depthIntrinsics->projectionParamsSimple.fx;
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x / (float)blockSize.x), (int)ceil((float)imgSize.y / (float)blockSize.y));
convertDisparityToDepth_device << <gridSize, blockSize >> >(d_out, d_in, disparityCalibParams, fx_depth, imgSize);
ORcudaKernelCheck;
}
void ITMViewBuilder_CUDA::ConvertDepthAffineToFloat(ITMFloatImage *depth_out, const ITMShortImage *depth_in, Vector2f depthCalibParams)
{
Vector2i imgSize = depth_in->noDims;
const short *d_in = depth_in->GetData(MEMORYDEVICE_CUDA);
float *d_out = depth_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x / (float)blockSize.x), (int)ceil((float)imgSize.y / (float)blockSize.y));
convertDepthAffineToFloat_device << <gridSize, blockSize >> >(d_out, d_in, imgSize, depthCalibParams);
ORcudaKernelCheck;
}
void ITMViewBuilder_CUDA::DepthFiltering(ITMFloatImage *image_out, const ITMFloatImage *image_in)
{
Vector2i imgDims = image_in->noDims;
const float *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
float *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgDims.x / (float)blockSize.x), (int)ceil((float)imgDims.y / (float)blockSize.y));
filterDepth_device << <gridSize, blockSize >> >(imageData_out, imageData_in, imgDims);
ORcudaKernelCheck;
}
void ITMViewBuilder_CUDA::ComputeNormalAndWeights(ITMFloat4Image *normal_out, ITMFloatImage *sigmaZ_out, const ITMFloatImage *depth_in, Vector4f intrinsic)
{
Vector2i imgDims = depth_in->noDims;
const float *depthData_in = depth_in->GetData(MEMORYDEVICE_CUDA);
float *sigmaZData_out = sigmaZ_out->GetData(MEMORYDEVICE_CUDA);
Vector4f *normalData_out = normal_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgDims.x / (float)blockSize.x), (int)ceil((float)imgDims.y / (float)blockSize.y));
ComputeNormalAndWeight_device << <gridSize, blockSize >> >(depthData_in, normalData_out, sigmaZData_out, imgDims, intrinsic);
ORcudaKernelCheck;
}
//---------------------------------------------------------------------------
//
// kernel function implementation
//
//---------------------------------------------------------------------------
__global__ void convertDisparityToDepth_device(float *d_out, const short *d_in, Vector2f disparityCalibParams, float fx_depth, Vector2i imgSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x >= imgSize.x) || (y >= imgSize.y)) return;
convertDisparityToDepth(d_out, x, y, d_in, disparityCalibParams, fx_depth, imgSize);
}
__global__ void convertDepthAffineToFloat_device(float *d_out, const short *d_in, Vector2i imgSize, Vector2f depthCalibParams)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x >= imgSize.x) || (y >= imgSize.y)) return;
convertDepthAffineToFloat(d_out, x, y, d_in, imgSize, depthCalibParams);
}
__global__ void filterDepth_device(float *imageData_out, const float *imageData_in, Vector2i imgDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < 2 || x > imgDims.x - 2 || y < 2 || y > imgDims.y - 2) return;
filterDepth(imageData_out, imageData_in, x, y, imgDims);
}
__global__ void ComputeNormalAndWeight_device(const float* depth_in, Vector4f* normal_out, float *sigmaZ_out, Vector2i imgDims, Vector4f intrinsic)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * imgDims.x;
if (x < 2 || x > imgDims.x - 2 || y < 2 || y > imgDims.y - 2)
{
normal_out[idx].w = -1.0f;
sigmaZ_out[idx] = -1;
return;
}
else
{
computeNormalAndWeight(depth_in, normal_out, sigmaZ_out, x, y, imgDims, intrinsic);
}
}
|
8b4e88cd0f36eed25f729a705cd06dc808b5cc3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
* Dwarf Mine - The 13-11 Benchmark
*
* Copyright (c) 2013 Bnger, Thomas; Kieschnick, Christian; Kusber,
* Michael; Lohse, Henning; Wuttke, Nikolai; Xylander, Oliver; Yao, Gary;
* Zimmermann, Florian
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*****************************************************************************/
#include <stdio.h>
#include <assert.h>
#include "MatrixMultiplication.h"
const size_t BLOCK_SIZE = DEFAULT_BLOCK_SIZE;
__device__ int div_ceil_d(int x, int y)
{
// return 1 + ((x - 1) / y);
// return (x + y - 1) / y;
return (x % y) ? x / y + 1 : x / y;
}
struct Matrix
{
int cols;
int rows;
int stride;
float* data;
};
__device__ void setElement(Matrix m, int row, int col, float value)
{
if (row >= m.rows || col >= m.cols) return;
m.data[(m.stride * row) + col] = value;
}
__device__ float getElement(Matrix m, int row, int col)
{
if (row >= m.rows || col >= m.cols) return 0.0f;
return m.data[(m.stride * row) + col];
}
__device__ Matrix getSubMatrix(Matrix m, int blockRow, int blockColumn)
{
Matrix n;
n.rows = ((blockRow+1)*blockDim.x > m.rows) ? (m.rows - blockRow*blockDim.x) : blockDim.x;
n.cols = ((blockColumn+1)*blockDim.x > m.cols) ? (m.cols - blockColumn*blockDim.x) : blockDim.x;
n.stride = m.stride;
n.data = &m.data[blockRow * m.stride * blockDim.x + blockColumn * blockDim.x];
return n;
}
__global__ void gemmKernel(int m, int n, int k, float* left, float* right, float* out)
{
Matrix leftMatrix;
leftMatrix.rows = m;
leftMatrix.cols = k;
leftMatrix.stride = k;
leftMatrix.data = left;
Matrix rightMatrix;
rightMatrix.rows = k;
rightMatrix.cols = n;
rightMatrix.stride = n;
rightMatrix.data = right;
Matrix outMatrix;
outMatrix.rows = m;
outMatrix.cols = n;
outMatrix.stride = n;
outMatrix.data = out;
int blockRow = blockIdx.y;
int blockColumn = blockIdx.x;
int row = threadIdx.y;
int col = threadIdx.x;
Matrix outSub = getSubMatrix(outMatrix, blockRow, blockColumn);
float sum = 0.0f;
for (int block = 0, end= div_ceil_d(leftMatrix.cols, blockDim.x); block < end ; ++block)
{
__shared__ float leftSub_s[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float rightSub_s[BLOCK_SIZE][BLOCK_SIZE];
Matrix leftSub = getSubMatrix(leftMatrix, blockRow, block);
Matrix rightSub = getSubMatrix(rightMatrix, block, blockColumn);
leftSub_s[row][col] = getElement(leftSub, row, col);
rightSub_s[row][col] = getElement(rightSub, row, col);
__syncthreads();
for (int i = 0; i < blockDim.x; ++i)
{
sum = __fmaf_rn(leftSub_s[row][i], rightSub_s[i][col], sum);
//sum += leftSub_s[row][i] * rightSub_s[i][col];
}
}
setElement(outSub, row, col, sum);
}
| 8b4e88cd0f36eed25f729a705cd06dc808b5cc3d.cu | /*****************************************************************************
* Dwarf Mine - The 13-11 Benchmark
*
* Copyright (c) 2013 Bünger, Thomas; Kieschnick, Christian; Kusber,
* Michael; Lohse, Henning; Wuttke, Nikolai; Xylander, Oliver; Yao, Gary;
* Zimmermann, Florian
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*****************************************************************************/
#include <stdio.h>
#include <assert.h>
#include "MatrixMultiplication.h"
const size_t BLOCK_SIZE = DEFAULT_BLOCK_SIZE;
__device__ int div_ceil_d(int x, int y)
{
// return 1 + ((x - 1) / y);
// return (x + y - 1) / y;
return (x % y) ? x / y + 1 : x / y;
}
struct Matrix
{
int cols;
int rows;
int stride;
float* data;
};
__device__ void setElement(Matrix m, int row, int col, float value)
{
if (row >= m.rows || col >= m.cols) return;
m.data[(m.stride * row) + col] = value;
}
__device__ float getElement(Matrix m, int row, int col)
{
if (row >= m.rows || col >= m.cols) return 0.0f;
return m.data[(m.stride * row) + col];
}
__device__ Matrix getSubMatrix(Matrix m, int blockRow, int blockColumn)
{
Matrix n;
n.rows = ((blockRow+1)*blockDim.x > m.rows) ? (m.rows - blockRow*blockDim.x) : blockDim.x;
n.cols = ((blockColumn+1)*blockDim.x > m.cols) ? (m.cols - blockColumn*blockDim.x) : blockDim.x;
n.stride = m.stride;
n.data = &m.data[blockRow * m.stride * blockDim.x + blockColumn * blockDim.x];
return n;
}
__global__ void gemmKernel(int m, int n, int k, float* left, float* right, float* out)
{
Matrix leftMatrix;
leftMatrix.rows = m;
leftMatrix.cols = k;
leftMatrix.stride = k;
leftMatrix.data = left;
Matrix rightMatrix;
rightMatrix.rows = k;
rightMatrix.cols = n;
rightMatrix.stride = n;
rightMatrix.data = right;
Matrix outMatrix;
outMatrix.rows = m;
outMatrix.cols = n;
outMatrix.stride = n;
outMatrix.data = out;
int blockRow = blockIdx.y;
int blockColumn = blockIdx.x;
int row = threadIdx.y;
int col = threadIdx.x;
Matrix outSub = getSubMatrix(outMatrix, blockRow, blockColumn);
float sum = 0.0f;
for (int block = 0, end= div_ceil_d(leftMatrix.cols, blockDim.x); block < end ; ++block)
{
__shared__ float leftSub_s[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float rightSub_s[BLOCK_SIZE][BLOCK_SIZE];
Matrix leftSub = getSubMatrix(leftMatrix, blockRow, block);
Matrix rightSub = getSubMatrix(rightMatrix, block, blockColumn);
leftSub_s[row][col] = getElement(leftSub, row, col);
rightSub_s[row][col] = getElement(rightSub, row, col);
__syncthreads();
for (int i = 0; i < blockDim.x; ++i)
{
sum = __fmaf_rn(leftSub_s[row][i], rightSub_s[i][col], sum);
//sum += leftSub_s[row][i] * rightSub_s[i][col];
}
}
setElement(outSub, row, col, sum);
}
|
84f038048937dfc2e1fe3ff8710539cec1ef63e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include "Structs.h"
#include "scheduler19.h"
#include "graph28.h"
#include "kernelconfig.h"
#include "list.h"
#include <hipcub/hipcub.hpp>
#include "myutils.h"
#include "myutils2.h"
double cpu_ratiotime,gpu_ratiotime;
void initnodesigmadist(unsigned source, unsigned nodes, unsigned* nodesigma, unsigned* nodedist){
unsigned ii;
for (ii = 0; ii < nodes; ii++) {
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
nodesigma[source] = 1;
nodedist[source] = 0;
}
void initnodesigmadist_multisource(Graph *graph,unsigned *values, unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count,unsigned *psrc,unsigned *noutgoing,unsigned *edgedst,unsigned *border){
unsigned ii,j;
/*
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
for ( ii = 0; ii < source_count; ii++) {
unsigned v = sources[ii],w;
unsigned num_edges_v = psrc[v];
for (j = num_edges_v; j < (num_edges_v + noutgoing[v]) ; j++) {
w = edgedst[j];
if(border[w]==0)continue;
nodedist[w]=MYINFINITY;
nodesigma[w]=0;
}
}
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_multisource_omp(Graph *graph,unsigned *values,unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count,int num_threads,unsigned *psrc,unsigned *noutgoing,unsigned *edgedst,unsigned *border){
unsigned ii,j;
/*
#pragma omp parallel for private(ii) schedule(static)
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
#pragma omp parallel for schedule(static) private(ii,j) num_threads(num_threads)
for ( ii = 0; ii < source_count; ii++) {
unsigned v = sources[ii],w;
unsigned num_edges_v = psrc[v];
for (j = num_edges_v; j < (num_edges_v + noutgoing[v]) ; j++) {
w = edgedst[j];
if(border[w]==0)continue;
nodedist[w]=MYINFINITY;
nodesigma[w]=0;
}
}
//#pragma omp parallel for private(ii) schedule(static)
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_multisource_singlerelax(Graph *graph,unsigned *values, unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count){
unsigned ii,j;
/*
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_multisource_omp_singlerelax(Graph *graph,unsigned *values,unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count,int num_threads){
unsigned ii,j;
/*
#pragma omp parallel for private(ii) schedule(static)
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
//#pragma omp parallel for private(ii) schedule(static)
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_omp(unsigned source, unsigned nodes, unsigned* nodesigma, unsigned* nodedist,int num_threads){
unsigned ii;
#pragma omp parallel for private(ii) schedule(guided) num_threads(num_threads)
for (ii = 0; ii < nodes; ii++) {
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
nodesigma[source] = 1;
nodedist[source] = 0;
}
void gpu_component (unsigned *psrc,unsigned *noutgoing,unsigned *d_psrc,unsigned *d_noutgoing,unsigned *edgesdstsrc,unsigned *edgessrcdst,unsigned hedges,unsigned hnodes,unsigned *hdist,unsigned *nodesigma,unsigned *edgesigma,unsigned source_count,unsigned *sources,hipDeviceProp_t *dp,bool BM_COMP,unsigned *nerr)
{
lonestar_gpu(psrc,noutgoing,d_psrc,d_noutgoing,edgesdstsrc,edgessrcdst,hedges,hnodes,hdist,nodesigma,edgesigma,source_count,sources,dp,BM_COMP,nerr);
}
void cpu_component (unsigned *psrc,unsigned *noutgoing,unsigned *edgesdstsrc,unsigned *edgessrcdst,unsigned hnodes,unsigned hedges,unsigned *hdist,unsigned *nodesigma,unsigned *edgesigma,unsigned source_count,unsigned *sources,omp_lock_t *lock,bool BM_COMP, int num_threads)
{
betweenness_centrality_parallel(hnodes,hedges,psrc,edgessrcdst,edgesdstsrc,noutgoing,sources,source_count,hdist,nodesigma,edgesigma,lock,num_threads);
//worklist_cpu(psrc,noutgoing,edgesdstsrc,edgessrcdst,hnodes,hedges,hdist,nodesigma,edgesigma,source_count,sources,lock,BM_COMP,num_threads);
}
void *cpu_BFS(void *P){
struct varto_cpu_part *var = (struct varto_cpu_part *)P;
Graph *graph = var->graph;
unsigned numEdges_src,numNodes_src,source = var->source,borderIndex,ii;
int num_threads = var->num_threads;
double starttime, endtime;
Graph::DevicePartition *srcpart = var->partition;
Graph::Partition *borderInfo = var->borderInfo;
numEdges_src = srcpart->numEdges;
numNodes_src = srcpart->numNodes;
unsigned borderCount = borderInfo->borderCount[CPUPARTITION]; /* Border Count is of non GPU partition */
/* Do CPU BFS calculate border distance vector*/
initnodesigmadist_omp (source,graph->nnodes, srcpart->nodesigma, srcpart->nodedist,num_threads);
starttime = rtclock();
cpu_component (srcpart->psrc,srcpart->noutgoing,srcpart->edgesrc,srcpart->edgedst,graph->nnodes,numEdges_src,srcpart->nodedist,srcpart->nodesigma,srcpart->edgesigma,1,&source,var->lock,false,num_threads);
endtime = rtclock ();
printf("For CPU BFS runtime = %.3lf ms\n", 1000*(endtime -starttime));
cpu_ratiotime += endtime-starttime;
}
void *gpu_BFS(void *var){
double starttime, endtime;
struct varto_gpu_part *P = (struct varto_gpu_part *)var;
unsigned borderIndex,borderIndex2;
Graph *graph = P->graph;
unsigned numEdges,numNodes,source = P->source,ii;
Graph::DevicePartition *gpupart = P->gpupart;
Graph::Partition *borderInfo = P->borderInfo;
numEdges = gpupart->numEdges;
numNodes = gpupart->numNodes;
foru foruzero = 0, foruone=1;
unsigned borderCount = borderInfo->borderCount[GPUPARTITION]; /* Border Count is of non GPU partition */
hipMemset(P->edgesigma,0,(numEdges) * sizeof(unsigned));
hipMemset(P->nodesigma,0,(graph->nnodes)*sizeof(unsigned));
hipMemset(P->nodedist,MYINFINITY,(graph->nnodes)*sizeof(unsigned));
hipMemcpy(&(P->nodedist[source]), &foruzero, sizeof(foruzero), hipMemcpyHostToDevice);
hipMemcpy(&(P->nodesigma[source]), &foruone, sizeof(foruone), hipMemcpyHostToDevice);
starttime = rtclock();
gpu_component (gpupart->psrc,gpupart->noutgoing,P->psrc,P->noutgoing,P->edgesrc,P->edgedst,numEdges,graph->nnodes,P->nodedist,P->nodesigma,P->edgesigma,1,&source,&(P->kconf->dp),false,P->nerr);
hipDeviceSynchronize ();
endtime = rtclock ();
printf("For GPU BFS runtime = %.3lf ms\n", 1000*(endtime -starttime));
gpu_ratiotime += endtime-starttime;
}
int main(int argc, char *argv[]){
if (argc < 2) {
printf("Usage: %s <graph>\n", argv[0]);
exit(1);
}
char *inputfile = argv[1];
unsigned weighted = 0,numEdges,numNodes;
unsigned *nodesigma, *edgesrc, *edgedst, *nodedist, *edgewt,*psrc,*noutgoing,*edgesigma,*border,*nerr;
int num_threads=16;
Graph graph;
hipDeviceReset();
KernelConfig kconf(1);
hipStream_t sone, stwo,sthree,sfour;
struct varto_cpu_part P;
struct varto_gpu_part data_gpu;
pthread_t thread1;
double starttime, endtime,Finalstarttime,Finalendtime,tmpsttime,tmpendtime,fwdph_starttime,totalIterativeTime,total_fwd_time=0,F_R,total_bck_time=0,bckph_starttime;
hipStreamCreate(&sone);
hipStreamCreate(&stwo);
hipStreamCreate(&sthree);
hipStreamCreate(&sfour);
if(omp_get_num_procs() <= 4)
num_threads = omp_get_num_procs();
else{
//num_threads = omp_get_num_procs()/2;
printf("No of CPUs %d\n",omp_get_num_procs());
num_threads-=0;
num_threads=16;
}
std::ofstream cfile;
cfile.open("ratio.txt");
omp_set_num_threads(num_threads);
graph.read(inputfile, weighted);
graph.initFrom(graph);
graph.formMetisPartitions(graph, &graph.partition);
graph.formDevicePartitions(graph);
srand (time(NULL));
double tstarttime = rtclock();
graph.num_threads = num_threads;
printf("max node count: %d\n", graph.maxNodeCount);
printf("max edge count: %d\n", graph.maxEdgeCount);
if (hipMalloc((void **)&edgesrc, (graph.maxEdgeCount) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating edgesrc failed");
if (hipMalloc((void **)&edgedst, (graph.maxEdgeCount) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating edgedst failed");
//if (hipMalloc((void **)&edgewt, (graph.maxEdgeCount) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating edgewt failed");
if (hipMalloc((void **)&edgesigma, (graph.maxEdgeCount) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating edgesigma failed");
if (hipMalloc((void **)&nodedist, (graph.nnodes) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating nodedist failed");
if (hipMalloc((void **)&nodesigma, (graph.nnodes) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating nodesigma failed");
//if (hipMalloc((void **)&active, (graph.maxEdgeCount) * sizeof(bool)) != hipSuccess) CudaTest("allocating edgedstsigma failed");
//if (hipMalloc((void **)&localchanged, sizeof(bool)) != hipSuccess) CudaTest("allocating localchanged failed");
if (hipMalloc((void **)&psrc, (graph.nnodes+1) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating nodedist failed");
if (hipMalloc((void **)&noutgoing, (graph.nnodes+1) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating nodedist failed");
if (hipMalloc((void **)&border, (graph.nnodes) * sizeof(unsigned)) != hipSuccess) CudaTest("allocating nodedist failed");
if (hipMalloc((void **)&nerr, sizeof(unsigned)) != hipSuccess) CudaTest("allocating nerr failed");// CAlculate no. of errors
kconf.setMaxThreadsPerBlock();
// kconf.setProblemSize(graph.maxEdgeCount);
if (!kconf.coversProblem()) {
printf("The number of threads(%d) does not cover the problem(%d), number of items per thread=%d.\n", kconf.getNumberOfBlockThreads()*kconf.getNumberOfBlocks(), kconf.getProblemSize(), kconf.getProblemSize() / (kconf.getNumberOfBlockThreads()*kconf.getNumberOfBlocks()));
}
CUDACOPY(edgesrc, graph.devicePartition[GPUPARTITION].edgesrc, (numEdges) * sizeof(unsigned), hipMemcpyHostToDevice,sone);
CUDACOPY(edgedst, graph.devicePartition[GPUPARTITION].edgedst, (numEdges) * sizeof(unsigned int), hipMemcpyHostToDevice,stwo);
//CUDACOPY(edgewt, graph.devicePartition[GPUPARTITION].edgewt, (numEdges) * sizeof(unsigned int), hipMemcpyHostToDevice,sthree);
CUDACOPY(psrc, graph.devicePartition[GPUPARTITION].psrc, (graph.nnodes+1) * sizeof(unsigned int), hipMemcpyHostToDevice,sone);
CUDACOPY(noutgoing, graph.devicePartition[GPUPARTITION].noutgoing, (graph.nnodes+1) * sizeof(unsigned int), hipMemcpyHostToDevice,stwo);
CUDACOPY(border, graph.partition.border, (graph.nnodes) * sizeof(unsigned int), hipMemcpyHostToDevice,stwo);
hipStreamSynchronize(sone);
hipStreamSynchronize(stwo);
hipStreamSynchronize(sthree);
omp_lock_t *writelock=(omp_lock_t *)malloc(graph.nnodes*sizeof(omp_lock_t));
// Perform border matrix computation for both cpu and gpu simulatenously here
// Initializing variables for cpu border matrix compuation function
P.partition = &(graph.devicePartition[CPUPARTITION]);
P.num_threads = num_threads;
P.graph = &graph;
P.borderInfo = &(graph.partition);
P.single_relax = false;
P.lock = writelock;
P.cpu_F_I=P.cpu_F_R=P.cpu_bck_knl_time=P.cpu_fwd_knl_time=P.cpu_tot_bck_time=0;
// Initializing variables for gpu_part function
data_gpu.gpupart = &(graph.devicePartition[GPUPARTITION]);
data_gpu.graph = &graph;
data_gpu.borderInfo = &(graph.partition);
data_gpu.nodesigma = nodesigma;
data_gpu.edgesrc = edgesrc;
data_gpu.edgedst = edgedst;
data_gpu.nodedist = nodedist;
data_gpu.edgewt = edgewt;
data_gpu.edgesigma = edgesigma;
data_gpu.kconf = &kconf;
data_gpu.single_relax = false;
data_gpu.psrc = psrc;
data_gpu.noutgoing = noutgoing;
data_gpu.border = border;
data_gpu.nerr = nerr;
data_gpu.num_threads = num_threads;
data_gpu.lock = writelock;
for(int ii=0;ii<5;ii++){
//Initializing data structures
//GPU data
hipMemset(edgesigma,0,((graph.devicePartition[GPUPARTITION].numEdges) * sizeof(unsigned)));
hipMemset(nodesigma,0,((graph.nnodes) * sizeof(unsigned)));
hipMemset(nodedist,MYINFINITY,((graph.nnodes) * sizeof(unsigned)));
// CPU data
memset(graph.devicePartition[CPUPARTITION].edgesigma,0,((graph.devicePartition[CPUPARTITION].numEdges) * sizeof(unsigned)));
memset(graph.devicePartition[CPUPARTITION].nodesigma,0,((graph.nnodes) * sizeof(unsigned)));
memset(graph.devicePartition[CPUPARTITION].nodedist,MYINFINITY,((graph.nnodes) * sizeof(unsigned)));
memset(graph.devicePartition[GPUPARTITION].edgesigma,0,((graph.devicePartition[GPUPARTITION].numEdges) * sizeof(unsigned)));
memset(graph.devicePartition[GPUPARTITION].nodesigma,0,((graph.nnodes) * sizeof(unsigned)));
memset(graph.devicePartition[GPUPARTITION].nodedist,MYINFINITY,((graph.nnodes) * sizeof(unsigned)));
while(1){
data_gpu.source = rand() % graph.nnodes;
if(graph.partition.part[data_gpu.source]==GPUPARTITION) break;
}
pthread_create(&thread1,NULL,gpu_BFS,&(data_gpu));
while(1){
P.source = rand() % graph.nnodes;
if(graph.partition.part[P.source]==CPUPARTITION) break;
}
cpu_BFS(&P);
pthread_join(thread1,NULL);
}
if(gpu_ratiotime/(cpu_ratiotime+gpu_ratiotime) > 0.10){
cfile<<gpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<" "<<cpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<endl;
cout<<"Ratio for cpu and gpu are "<<gpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<" "<<cpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<std::endl;
}
else{
cout<<"Ratio for cpu and gpu are "<<10<<" "<<90;
cfile<<10<<" "<<90;
}
//cout<<"Ratio for cpu and gpu are "<<35<<" "<<65;
//cfile<<50<<" "<<50;
//cfile<<40<<" "<<60;
//cfile<<20<<" "<<80;
cfile.close();
char name[80]="";
strcat(name,"./partition_patoh.exe ");
strcat(name,inputfile);
system(name);
double tendtime = rtclock();
printf("Ration calculation time = %.3lf ms\n",(tendtime-tstarttime)*1000);
/*
std::ofstream outfile;
outfile.open("partitioninfo.txt");
outfile<<graph.partition.edgecut<<std::endl;
for(unsigned ii=0;ii<graph.nnodes;ii++)
outfile<<graph.partition.part[ii]<<std::endl;
outfile.close();
*/
//hipDeviceReset();
return 0;
}
| 84f038048937dfc2e1fe3ff8710539cec1ef63e1.cu | #include "common.h"
#include "Structs.h"
#include "scheduler19.h"
#include "graph28.h"
#include "kernelconfig.h"
#include "list.h"
#include <cub/cub.cuh>
#include "myutils.h"
#include "myutils2.h"
double cpu_ratiotime,gpu_ratiotime;
void initnodesigmadist(unsigned source, unsigned nodes, unsigned* nodesigma, unsigned* nodedist){
unsigned ii;
for (ii = 0; ii < nodes; ii++) {
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
nodesigma[source] = 1;
nodedist[source] = 0;
}
void initnodesigmadist_multisource(Graph *graph,unsigned *values, unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count,unsigned *psrc,unsigned *noutgoing,unsigned *edgedst,unsigned *border){
unsigned ii,j;
/*
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
for ( ii = 0; ii < source_count; ii++) {
unsigned v = sources[ii],w;
unsigned num_edges_v = psrc[v];
for (j = num_edges_v; j < (num_edges_v + noutgoing[v]) ; j++) {
w = edgedst[j];
if(border[w]==0)continue;
nodedist[w]=MYINFINITY;
nodesigma[w]=0;
}
}
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_multisource_omp(Graph *graph,unsigned *values,unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count,int num_threads,unsigned *psrc,unsigned *noutgoing,unsigned *edgedst,unsigned *border){
unsigned ii,j;
/*
#pragma omp parallel for private(ii) schedule(static)
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
#pragma omp parallel for schedule(static) private(ii,j) num_threads(num_threads)
for ( ii = 0; ii < source_count; ii++) {
unsigned v = sources[ii],w;
unsigned num_edges_v = psrc[v];
for (j = num_edges_v; j < (num_edges_v + noutgoing[v]) ; j++) {
w = edgedst[j];
if(border[w]==0)continue;
nodedist[w]=MYINFINITY;
nodesigma[w]=0;
}
}
//#pragma omp parallel for private(ii) schedule(static)
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_multisource_singlerelax(Graph *graph,unsigned *values, unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count){
unsigned ii,j;
/*
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_multisource_omp_singlerelax(Graph *graph,unsigned *values,unsigned *sigma_values,unsigned nodes, unsigned* nodesigma, unsigned* nodedist,unsigned *sources,unsigned source_count,int num_threads){
unsigned ii,j;
/*
#pragma omp parallel for private(ii) schedule(static)
for (ii = 0; ii < nodes; ii++) {
// if(graph->partition.border[ii]==0)
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
*/
//#pragma omp parallel for private(ii) schedule(static)
for(ii=0 ; ii < source_count ; ii++)
{
nodedist[sources[ii]] = values[ii];
nodesigma[sources[ii]] = sigma_values[ii];
}
}
void initnodesigmadist_omp(unsigned source, unsigned nodes, unsigned* nodesigma, unsigned* nodedist,int num_threads){
unsigned ii;
#pragma omp parallel for private(ii) schedule(guided) num_threads(num_threads)
for (ii = 0; ii < nodes; ii++) {
nodesigma[ii] = 0;
nodedist[ii] = MYINFINITY;
}
nodesigma[source] = 1;
nodedist[source] = 0;
}
void gpu_component (unsigned *psrc,unsigned *noutgoing,unsigned *d_psrc,unsigned *d_noutgoing,unsigned *edgesdstsrc,unsigned *edgessrcdst,unsigned hedges,unsigned hnodes,unsigned *hdist,unsigned *nodesigma,unsigned *edgesigma,unsigned source_count,unsigned *sources,cudaDeviceProp *dp,bool BM_COMP,unsigned *nerr)
{
lonestar_gpu(psrc,noutgoing,d_psrc,d_noutgoing,edgesdstsrc,edgessrcdst,hedges,hnodes,hdist,nodesigma,edgesigma,source_count,sources,dp,BM_COMP,nerr);
}
void cpu_component (unsigned *psrc,unsigned *noutgoing,unsigned *edgesdstsrc,unsigned *edgessrcdst,unsigned hnodes,unsigned hedges,unsigned *hdist,unsigned *nodesigma,unsigned *edgesigma,unsigned source_count,unsigned *sources,omp_lock_t *lock,bool BM_COMP, int num_threads)
{
betweenness_centrality_parallel(hnodes,hedges,psrc,edgessrcdst,edgesdstsrc,noutgoing,sources,source_count,hdist,nodesigma,edgesigma,lock,num_threads);
//worklist_cpu(psrc,noutgoing,edgesdstsrc,edgessrcdst,hnodes,hedges,hdist,nodesigma,edgesigma,source_count,sources,lock,BM_COMP,num_threads);
}
void *cpu_BFS(void *P){
struct varto_cpu_part *var = (struct varto_cpu_part *)P;
Graph *graph = var->graph;
unsigned numEdges_src,numNodes_src,source = var->source,borderIndex,ii;
int num_threads = var->num_threads;
double starttime, endtime;
Graph::DevicePartition *srcpart = var->partition;
Graph::Partition *borderInfo = var->borderInfo;
numEdges_src = srcpart->numEdges;
numNodes_src = srcpart->numNodes;
unsigned borderCount = borderInfo->borderCount[CPUPARTITION]; /* Border Count is of non GPU partition */
/* Do CPU BFS calculate border distance vector*/
initnodesigmadist_omp (source,graph->nnodes, srcpart->nodesigma, srcpart->nodedist,num_threads);
starttime = rtclock();
cpu_component (srcpart->psrc,srcpart->noutgoing,srcpart->edgesrc,srcpart->edgedst,graph->nnodes,numEdges_src,srcpart->nodedist,srcpart->nodesigma,srcpart->edgesigma,1,&source,var->lock,false,num_threads);
endtime = rtclock ();
printf("For CPU BFS runtime = %.3lf ms\n", 1000*(endtime -starttime));
cpu_ratiotime += endtime-starttime;
}
void *gpu_BFS(void *var){
double starttime, endtime;
struct varto_gpu_part *P = (struct varto_gpu_part *)var;
unsigned borderIndex,borderIndex2;
Graph *graph = P->graph;
unsigned numEdges,numNodes,source = P->source,ii;
Graph::DevicePartition *gpupart = P->gpupart;
Graph::Partition *borderInfo = P->borderInfo;
numEdges = gpupart->numEdges;
numNodes = gpupart->numNodes;
foru foruzero = 0, foruone=1;
unsigned borderCount = borderInfo->borderCount[GPUPARTITION]; /* Border Count is of non GPU partition */
cudaMemset(P->edgesigma,0,(numEdges) * sizeof(unsigned));
cudaMemset(P->nodesigma,0,(graph->nnodes)*sizeof(unsigned));
cudaMemset(P->nodedist,MYINFINITY,(graph->nnodes)*sizeof(unsigned));
cudaMemcpy(&(P->nodedist[source]), &foruzero, sizeof(foruzero), cudaMemcpyHostToDevice);
cudaMemcpy(&(P->nodesigma[source]), &foruone, sizeof(foruone), cudaMemcpyHostToDevice);
starttime = rtclock();
gpu_component (gpupart->psrc,gpupart->noutgoing,P->psrc,P->noutgoing,P->edgesrc,P->edgedst,numEdges,graph->nnodes,P->nodedist,P->nodesigma,P->edgesigma,1,&source,&(P->kconf->dp),false,P->nerr);
cudaDeviceSynchronize ();
endtime = rtclock ();
printf("For GPU BFS runtime = %.3lf ms\n", 1000*(endtime -starttime));
gpu_ratiotime += endtime-starttime;
}
int main(int argc, char *argv[]){
if (argc < 2) {
printf("Usage: %s <graph>\n", argv[0]);
exit(1);
}
char *inputfile = argv[1];
unsigned weighted = 0,numEdges,numNodes;
unsigned *nodesigma, *edgesrc, *edgedst, *nodedist, *edgewt,*psrc,*noutgoing,*edgesigma,*border,*nerr;
int num_threads=16;
Graph graph;
cudaDeviceReset();
KernelConfig kconf(1);
cudaStream_t sone, stwo,sthree,sfour;
struct varto_cpu_part P;
struct varto_gpu_part data_gpu;
pthread_t thread1;
double starttime, endtime,Finalstarttime,Finalendtime,tmpsttime,tmpendtime,fwdph_starttime,totalIterativeTime,total_fwd_time=0,F_R,total_bck_time=0,bckph_starttime;
cudaStreamCreate(&sone);
cudaStreamCreate(&stwo);
cudaStreamCreate(&sthree);
cudaStreamCreate(&sfour);
if(omp_get_num_procs() <= 4)
num_threads = omp_get_num_procs();
else{
//num_threads = omp_get_num_procs()/2;
printf("No of CPUs %d\n",omp_get_num_procs());
num_threads-=0;
num_threads=16;
}
std::ofstream cfile;
cfile.open("ratio.txt");
omp_set_num_threads(num_threads);
graph.read(inputfile, weighted);
graph.initFrom(graph);
graph.formMetisPartitions(graph, &graph.partition);
graph.formDevicePartitions(graph);
srand (time(NULL));
double tstarttime = rtclock();
graph.num_threads = num_threads;
printf("max node count: %d\n", graph.maxNodeCount);
printf("max edge count: %d\n", graph.maxEdgeCount);
if (cudaMalloc((void **)&edgesrc, (graph.maxEdgeCount) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating edgesrc failed");
if (cudaMalloc((void **)&edgedst, (graph.maxEdgeCount) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating edgedst failed");
//if (cudaMalloc((void **)&edgewt, (graph.maxEdgeCount) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating edgewt failed");
if (cudaMalloc((void **)&edgesigma, (graph.maxEdgeCount) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating edgesigma failed");
if (cudaMalloc((void **)&nodedist, (graph.nnodes) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nodedist failed");
if (cudaMalloc((void **)&nodesigma, (graph.nnodes) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nodesigma failed");
//if (cudaMalloc((void **)&active, (graph.maxEdgeCount) * sizeof(bool)) != cudaSuccess) CudaTest("allocating edgedstsigma failed");
//if (cudaMalloc((void **)&localchanged, sizeof(bool)) != cudaSuccess) CudaTest("allocating localchanged failed");
if (cudaMalloc((void **)&psrc, (graph.nnodes+1) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nodedist failed");
if (cudaMalloc((void **)&noutgoing, (graph.nnodes+1) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nodedist failed");
if (cudaMalloc((void **)&border, (graph.nnodes) * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nodedist failed");
if (cudaMalloc((void **)&nerr, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nerr failed");// CAlculate no. of errors
kconf.setMaxThreadsPerBlock();
// kconf.setProblemSize(graph.maxEdgeCount);
if (!kconf.coversProblem()) {
printf("The number of threads(%d) does not cover the problem(%d), number of items per thread=%d.\n", kconf.getNumberOfBlockThreads()*kconf.getNumberOfBlocks(), kconf.getProblemSize(), kconf.getProblemSize() / (kconf.getNumberOfBlockThreads()*kconf.getNumberOfBlocks()));
}
CUDACOPY(edgesrc, graph.devicePartition[GPUPARTITION].edgesrc, (numEdges) * sizeof(unsigned), cudaMemcpyHostToDevice,sone);
CUDACOPY(edgedst, graph.devicePartition[GPUPARTITION].edgedst, (numEdges) * sizeof(unsigned int), cudaMemcpyHostToDevice,stwo);
//CUDACOPY(edgewt, graph.devicePartition[GPUPARTITION].edgewt, (numEdges) * sizeof(unsigned int), cudaMemcpyHostToDevice,sthree);
CUDACOPY(psrc, graph.devicePartition[GPUPARTITION].psrc, (graph.nnodes+1) * sizeof(unsigned int), cudaMemcpyHostToDevice,sone);
CUDACOPY(noutgoing, graph.devicePartition[GPUPARTITION].noutgoing, (graph.nnodes+1) * sizeof(unsigned int), cudaMemcpyHostToDevice,stwo);
CUDACOPY(border, graph.partition.border, (graph.nnodes) * sizeof(unsigned int), cudaMemcpyHostToDevice,stwo);
cudaStreamSynchronize(sone);
cudaStreamSynchronize(stwo);
cudaStreamSynchronize(sthree);
omp_lock_t *writelock=(omp_lock_t *)malloc(graph.nnodes*sizeof(omp_lock_t));
// Perform border matrix computation for both cpu and gpu simulatenously here
// Initializing variables for cpu border matrix compuation function
P.partition = &(graph.devicePartition[CPUPARTITION]);
P.num_threads = num_threads;
P.graph = &graph;
P.borderInfo = &(graph.partition);
P.single_relax = false;
P.lock = writelock;
P.cpu_F_I=P.cpu_F_R=P.cpu_bck_knl_time=P.cpu_fwd_knl_time=P.cpu_tot_bck_time=0;
// Initializing variables for gpu_part function
data_gpu.gpupart = &(graph.devicePartition[GPUPARTITION]);
data_gpu.graph = &graph;
data_gpu.borderInfo = &(graph.partition);
data_gpu.nodesigma = nodesigma;
data_gpu.edgesrc = edgesrc;
data_gpu.edgedst = edgedst;
data_gpu.nodedist = nodedist;
data_gpu.edgewt = edgewt;
data_gpu.edgesigma = edgesigma;
data_gpu.kconf = &kconf;
data_gpu.single_relax = false;
data_gpu.psrc = psrc;
data_gpu.noutgoing = noutgoing;
data_gpu.border = border;
data_gpu.nerr = nerr;
data_gpu.num_threads = num_threads;
data_gpu.lock = writelock;
for(int ii=0;ii<5;ii++){
//Initializing data structures
//GPU data
cudaMemset(edgesigma,0,((graph.devicePartition[GPUPARTITION].numEdges) * sizeof(unsigned)));
cudaMemset(nodesigma,0,((graph.nnodes) * sizeof(unsigned)));
cudaMemset(nodedist,MYINFINITY,((graph.nnodes) * sizeof(unsigned)));
// CPU data
memset(graph.devicePartition[CPUPARTITION].edgesigma,0,((graph.devicePartition[CPUPARTITION].numEdges) * sizeof(unsigned)));
memset(graph.devicePartition[CPUPARTITION].nodesigma,0,((graph.nnodes) * sizeof(unsigned)));
memset(graph.devicePartition[CPUPARTITION].nodedist,MYINFINITY,((graph.nnodes) * sizeof(unsigned)));
memset(graph.devicePartition[GPUPARTITION].edgesigma,0,((graph.devicePartition[GPUPARTITION].numEdges) * sizeof(unsigned)));
memset(graph.devicePartition[GPUPARTITION].nodesigma,0,((graph.nnodes) * sizeof(unsigned)));
memset(graph.devicePartition[GPUPARTITION].nodedist,MYINFINITY,((graph.nnodes) * sizeof(unsigned)));
while(1){
data_gpu.source = rand() % graph.nnodes;
if(graph.partition.part[data_gpu.source]==GPUPARTITION) break;
}
pthread_create(&thread1,NULL,gpu_BFS,&(data_gpu));
while(1){
P.source = rand() % graph.nnodes;
if(graph.partition.part[P.source]==CPUPARTITION) break;
}
cpu_BFS(&P);
pthread_join(thread1,NULL);
}
if(gpu_ratiotime/(cpu_ratiotime+gpu_ratiotime) > 0.10){
cfile<<gpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<" "<<cpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<endl;
cout<<"Ratio for cpu and gpu are "<<gpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<" "<<cpu_ratiotime/(cpu_ratiotime+gpu_ratiotime)<<std::endl;
}
else{
cout<<"Ratio for cpu and gpu are "<<10<<" "<<90;
cfile<<10<<" "<<90;
}
//cout<<"Ratio for cpu and gpu are "<<35<<" "<<65;
//cfile<<50<<" "<<50;
//cfile<<40<<" "<<60;
//cfile<<20<<" "<<80;
cfile.close();
char name[80]="";
strcat(name,"./partition_patoh.exe ");
strcat(name,inputfile);
system(name);
double tendtime = rtclock();
printf("Ration calculation time = %.3lf ms\n",(tendtime-tstarttime)*1000);
/*
std::ofstream outfile;
outfile.open("partitioninfo.txt");
outfile<<graph.partition.edgecut<<std::endl;
for(unsigned ii=0;ii<graph.nnodes;ii++)
outfile<<graph.partition.part[ii]<<std::endl;
outfile.close();
*/
//cudaDeviceReset();
return 0;
}
|
eed15538c52d051864987199633178aeda02aff4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
#include<string.h>
__global__ void copy(char *a, char *b, int n,int m)
{
int tid;
tid = threadIdx.x;
int i=0;
while(i<m)
{
b[i*n+tid]=a[tid];
i++;
}
}
int main()
{
char a[100],b[100];
int i,n,m,size;
char *d_a, *d_b;
printf("\nEnter the string\n");
scanf("%s", a);
printf("How many times you want to repeat the string?\n");
scanf("%d", &m);
n = strlen(a);
printf("\nNo of characters is %d\t", n);
size = sizeof(char);
printf("\nSize is \t%d\n", size);
hipMalloc((void **)&d_a,n*size);
hipMalloc((void **)&d_b,n*m*size);
hipMemcpy(d_a,a,n*size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( copy), dim3(1),dim3(n), 0, 0, d_a,d_b,n,m);
hipMemcpy(b,d_b,n*m*size,hipMemcpyDeviceToHost);
printf("\nRepeated string is \n");
for(i=0;i<n*m;i++)
printf("%c",b[i]);
hipFree(d_a);
hipFree(d_b);
return 0;
} | eed15538c52d051864987199633178aeda02aff4.cu | #include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
#include<string.h>
__global__ void copy(char *a, char *b, int n,int m)
{
int tid;
tid = threadIdx.x;
int i=0;
while(i<m)
{
b[i*n+tid]=a[tid];
i++;
}
}
int main()
{
char a[100],b[100];
int i,n,m,size;
char *d_a, *d_b;
printf("\nEnter the string\n");
scanf("%s", a);
printf("How many times you want to repeat the string?\n");
scanf("%d", &m);
n = strlen(a);
printf("\nNo of characters is %d\t", n);
size = sizeof(char);
printf("\nSize is \t%d\n", size);
cudaMalloc((void **)&d_a,n*size);
cudaMalloc((void **)&d_b,n*m*size);
cudaMemcpy(d_a,a,n*size,cudaMemcpyHostToDevice);
copy<<<1,n>>>(d_a,d_b,n,m);
cudaMemcpy(b,d_b,n*m*size,cudaMemcpyDeviceToHost);
printf("\nRepeated string is \n");
for(i=0;i<n*m;i++)
printf("%c",b[i]);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
1c9b6d506aad5c5e6e302179e91f707ccd0e2900.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix.h"
#include "crbm_hip.cuh"
#include <iostream>
#include "utils.h"
#include "crbm_kernel.cuh"
#include <cassert>
using namespace std;
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
__global__ void setup_curand_kernel(hiprandState_t *state, int count){
int id = threadIdx.x + blockIdx.x * 64;
if(id < count){
hiprand_init(1234, id, 0, &state[id]);
}
}
void setup_curand(hiprandState_t **state, int count){
CUDA_CALL(hipMalloc((void**)state, count * sizeof(hiprandState_t)));
hipLaunchKernelGGL(( setup_curand_kernel), dim3(ceil(count/64.0)), dim3(64), 0, 0, *state, count);
}
CRBM::CRBM(int filter_num, int filter_size,
int input_num, int input_size, int channel_num,
int left_upper_padding, int right_low_padding,
int pooling_rate,
Matrix *filters, Matrix *hbias,
Matrix *vbias){
this->epsilon = 0.01;
this->momentum = 0.5;
this->l2reg = 0.01;
this->ph_lambda = 5;
this->ph = 0.002;
this->sigma = 0.2;
this->cur_trial = 0;
this->filter_num = filter_num;
this->filter_size = filter_size;
this->input_num = input_num;
this->input_size = input_size;
this->pooling_rate = pooling_rate;
this->channel_num = channel_num;
this->left_upper_padding = left_upper_padding;
this->right_low_padding = right_low_padding;
this->feature_map_size = input_size + left_upper_padding +
right_low_padding - filter_size + 1;
this->subsample_size = feature_map_size / pooling_rate;
if(filters == NULL){
this->CPU_filters = filter_init(filter_size, filter_num, channel_num);
}else{
this->CPU_filters = new Matrix(*filters);
}
if(hbias == NULL){
this->CPU_hbias = new Matrix(filter_num, 1, -0.1f, -0.1f);
}else{
this->CPU_hbias = new Matrix(*hbias);
}
if(vbias == NULL){
this->CPU_vbias = new Matrix(channel_num, 1);
}else{
this->CPU_vbias = new Matrix(*vbias);
}
this->CPU_input = new Matrix(input_num, channel_num * input_size * input_size);
this->CPU_y_h = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
this->CPU_y_h_probs = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
this->CPU_y_h2 = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
this->CPU_y_h2_probs = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
//filter_num * feature_map_size * feature_map_size, 1, 1);
this->CPU_y_p = new Matrix(input_num,
filter_num * subsample_size * subsample_size);
this->CPU_y_v = new Matrix(this->CPU_input->get_row_num(),
this->CPU_input->get_col_num());
this->CPU_y_v_probs = new Matrix(this->CPU_input->get_row_num(),
this->CPU_input->get_col_num());
this->CPU_d_w = new Matrix(this->CPU_filters->get_row_num(),
this->CPU_filters->get_col_num());
this->CPU_d_w_pre = new Matrix(this->CPU_filters->get_row_num(),
this->CPU_filters->get_col_num());
this->CPU_d_hbias = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->CPU_d_hbias_pre = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->CPU_d_hbias_tmp = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->CPU_d_h_sum_tmp = new Matrix(1, this->CPU_y_h->get_col_num());
this->GPU_filters = new NVMatrix(*this->CPU_filters);
this->GPU_hbias = new NVMatrix(*this->CPU_hbias);
this->GPU_vbias = new NVMatrix(*this->CPU_vbias);
this->GPU_input = new NVMatrix(*this->CPU_input);
this->GPU_y_h = new NVMatrix(*this->CPU_y_h);
this->GPU_y_h_probs = new NVMatrix(*this->CPU_y_h_probs);
this->GPU_y_h2 = new NVMatrix(*this->CPU_y_h2);
this->GPU_y_h2_probs = new NVMatrix(*this->CPU_y_h2_probs);
this->GPU_y_p = new NVMatrix(*this->CPU_y_p);
this->GPU_y_v = new NVMatrix(*this->CPU_y_v);
this->GPU_y_v_probs = new NVMatrix(*this->CPU_y_v_probs);
this->GPU_d_w = new NVMatrix(*this->CPU_d_w);
this->GPU_d_w_pre = new NVMatrix(*this->CPU_d_w);
this->GPU_d_hbias = new NVMatrix(*this->CPU_d_hbias);
this->GPU_d_hbias_pre = new NVMatrix(*this->CPU_d_hbias);
this->GPU_d_hbias_tmp = new NVMatrix(*this->CPU_d_hbias_tmp);
this->GPU_d_h_sum_tmp= new NVMatrix(*this->CPU_d_h_sum_tmp);
this->rnd_num = ::max(input_num * channel_num * input_size * input_size, input_num * feature_map_size * feature_map_size / (pooling_rate * pooling_rate));
hiprandCreateGenerator(&this->rnd_gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(this->rnd_gen, 1234ULL);
hipMalloc((void **)&this->rnd_array, this->rnd_num * sizeof(float));
//setup_curand(&this->rnd_state, this->rnd_state_num);
}
CRBM::~CRBM(){
delete this->CPU_filters;
delete this->CPU_hbias;
delete this->CPU_vbias;
delete this->CPU_y_h;
delete this->CPU_y_h_probs;
delete this->CPU_y_p;
delete this->CPU_y_v;
delete this->CPU_y_v_probs;
delete this->CPU_d_w;
delete this->CPU_d_w_pre;
delete this->CPU_d_hbias;
delete this->CPU_d_hbias_pre;
delete this->CPU_d_hbias_tmp;
delete this->CPU_d_h_sum_tmp;
delete this->GPU_filters;
delete this->GPU_hbias;
delete this->GPU_vbias;
delete this->GPU_y_h;
delete this->GPU_y_h_probs;
delete this->GPU_y_p;
delete this->GPU_y_v;
delete this->GPU_y_v_probs;
delete this->GPU_d_w;
delete this->GPU_d_w_pre;
delete this->GPU_d_hbias;
delete this->GPU_d_hbias_pre;
delete this->GPU_d_hbias_tmp;
delete this->GPU_d_h_sum_tmp;
CUDA_CALL(hipFree(this->rnd_array));
hiprandDestroyGenerator(this->rnd_gen);
}
Matrix* CRBM::filter_init(int filter_size, int filter_num, int channel_num){
float low = - 4 * sqrt(6.0 / (2 * filter_size * filter_size * channel_num));
float upper = -low;
return new Matrix(filter_num, channel_num*filter_size*filter_size, low, upper);
}
void CRBM::CPU_convolution_forward(float *input, float *filter, float *target, float *hbias){
bzero(target, input_num * filter_num * feature_map_size * feature_map_size * sizeof(float));
for(int img = 0; img < input_num; img++){
for(int fil = 0; fil < filter_num; fil++){
float *curBias = hbias + fil;
for(int r = 0; r < feature_map_size; r++){
for(int c = 0; c < feature_map_size; c++){
float *curFilter = filter + fil * channel_num * filter_size * filter_size;
float* curTarget = target + img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size +
r * feature_map_size + c;
for(int k = 0; k < channel_num; k++){
float* curInput = input + img * channel_num * input_size * input_size +
k * input_size * input_size +
(r < left_upper_padding ? 0 : r - left_upper_padding) * input_size +
(c < left_upper_padding ? 0 : c - left_upper_padding);
for(int i = 0; i < filter_size; i++){
if(!((r+i) < left_upper_padding ||
(r+i) >= (left_upper_padding + input_size))){
int step = 0;
for(int j = 0; j < filter_size; j++){
if(!((c+j) < left_upper_padding ||
(c+j) >= (left_upper_padding + input_size))){
*curTarget += curFilter[i*filter_size+j] * (*curInput);
curInput++;
step++;
}
}
curInput += input_size - step;
}
}
curFilter += filter_size * filter_size;
}
*curTarget += *curBias;
*curTarget = (1.0 / (this->sigma * this->sigma)) * (*curTarget);
}
}
}
}
}
static int max_pooling_multinomial(float *probs, int len){
float rnd = random_float(0, 1);
int i;
for(i = 0; rnd > probs[i]; i++, probs[i] += probs[i-1]);
return i;
}
void CRBM::CPU_max_pooling(float *y_h, float *y_h_probs, float *y_p){
float pooling_area[MAX_POOLING_RATE*MAX_FILETER_SIZE+1];
for(int img = 0; img < input_num; img++){
for(int fil = 0; fil < filter_num; fil++){
float *fm = y_h +
img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
float *probs = y_h_probs +
img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
float *target = y_p +
img * filter_num * subsample_size * subsample_size +
fil * subsample_size * subsample_size;
for(int i = 0; i < feature_map_size; i += pooling_rate){
for(int j = 0; j < feature_map_size; j += pooling_rate){
float sum = 0;
for(int pi = 0; pi < pooling_rate; pi++){
for(int pj = 0; pj < pooling_rate; pj++){
float *cur_fm = fm + (i+pi) * feature_map_size + (j+pj);
if(*cur_fm > 50)
*cur_fm = 50;
*cur_fm = expf(*cur_fm);
assert(!isinf(*cur_fm));
sum += *cur_fm;
}
}
for(int pi = 0; pi < pooling_rate; pi++){
for(int pj = 0; pj < pooling_rate; pj++){
float *cur_fm = fm + (i+pi) * feature_map_size + (j+pj);
float *cur_probs = probs + (i+pi) * feature_map_size + (j+pj);
*cur_probs = *cur_fm / (1 + sum);
pooling_area[pi*pooling_rate+pj] = *cur_probs;
*cur_fm = 0;
}
}
pooling_area[pooling_rate*pooling_rate] = 1.0/(1+sum);
int pooling_idx = max_pooling_multinomial(pooling_area,
pooling_rate*pooling_rate+1);
if(pooling_idx == pooling_rate*pooling_rate){
target[(i/pooling_rate)*subsample_size+(j/pooling_rate)] = 0;
}else{
target[(i/pooling_rate)*subsample_size+(j/pooling_rate)] = 1;
int pi = pooling_idx / pooling_rate;
int pj = pooling_idx % pooling_rate;
fm[(i+pi) * feature_map_size + (j+pj)] = 1;
}
}
}
}
}
}
void CRBM::CPU_convolution_backward(float *y_h, float *filters, float *vbias,
float *y_v_probs, float *y_v){
float tmp_recon[MAX_IMGAG_SIZE][MAX_IMGAG_SIZE];
int padding = filter_size-1;
int input_padding_size = feature_map_size + filter_size - 1;
int lu_padding = left_upper_padding;
bzero(tmp_recon, sizeof(tmp_recon));
for(int img = 0; img < input_num; img++){
for(int cha = 0; cha < channel_num; cha++){
float *target = y_v_probs +
img * channel_num * input_size * input_size +
cha * input_size * input_size;
float *target_y_v = y_v +
img * channel_num * input_size * input_size +
cha * input_size * input_size;
for(int fil = 0; fil < filter_num; fil++){
float *filter = filters +
fil * filter_size * filter_size * channel_num +
cha * filter_size * filter_size;
float *fm = y_h +
img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
for(int r = 0; r < feature_map_size + filter_size - 1; r++){
for(int c = 0; c < feature_map_size + filter_size - 1; c++){
for(int i = r; i < r+filter_size; i++){
for(int j = c; j < c+filter_size; j++){
if(!(i < padding || j < padding ||
i >= (padding + feature_map_size) ||
j >= (padding + feature_map_size))){
tmp_recon[r][c] +=
fm[(i-padding)*feature_map_size + (j-padding)] *
filter[(filter_size-1-(i-r))*filter_size + (filter_size-1-(j-c))];
}
}
}
}
}
}
for(int i = 0; i < input_size; i++){
for(int j = 0; j < input_size; j++){
target[i*input_size+j] = tmp_recon[i+lu_padding][j+lu_padding];
//target[i*input_size+j] = logisitc(tmp_recon[i+lu_padding][j+lu_padding]);
//target_y_v[i*input_size+j] =
// (random_float(0,1) < target[i*input_size+j]) ? 1 : 0;
}
}
bzero(tmp_recon, sizeof(tmp_recon));
}
}
}
/*
* positive phasenegative phase
* is_inittruepositive phase, dw0
* is_initfalsenegative phase, dw -= new_dw
*/
void CRBM::CPU_compute_d_w(float *v, float *h, float *dw, bool is_init){
float sign;
int lu_padding = left_upper_padding;
if(is_init){
bzero(dw, filter_num * channel_num * filter_size * filter_size * sizeof(float));
sign = 1.0f;
}else{
sign = -1.0f;
}
for(int img = 0; img < input_num; img++){
for(int fil = 0; fil < filter_num; fil++){
float *this_h = h + img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
for(int cha = 0; cha < channel_num; cha++){
float *this_v = v + img * channel_num * input_size * input_size +
cha * input_size * input_size;
float *this_dw = dw + fil * channel_num * filter_size * filter_size +
cha * filter_size * filter_size;
for(int r = 0; r < filter_size; r++){
for(int c = 0; c < filter_size; c++){
float *cur_v = this_v + (r-lu_padding) * input_size +
(c-lu_padding);
for(int i = 0; i < feature_map_size; i++){
for(int j = 0; j < feature_map_size; j++){
if(!((r+i) < lu_padding ||
(c+j) < lu_padding ||
(r+i) >= (lu_padding+input_size) ||
(c+j) >= (lu_padding+input_size))){
this_dw[r*filter_size+c] +=
sign * cur_v[j] * this_h[i*feature_map_size+j];
}
}
cur_v += input_size;
}
}
}
}
}
}
}
void CRBM::GPU_convolution_forward(float *input, float *filters, float *y_h, float *hbias){
dim3 blocks = dim3(input_size / 32 * filter_num, input_size / 32 * input_num);
dim3 threads = dim3(32, 32);
hipLaunchKernelGGL(( convolution_forward_kernel), dim3(blocks), dim3(threads), 0, 0, input, filters, y_h,
hbias, input_size, channel_num, feature_map_size, filter_size,
filter_num, left_upper_padding, sigma);
hipDeviceSynchronize();
}
void CRBM::GPU_max_pooling(float *y_h, float *y_h_probs, float *y_p){
dim3 blocks = dim3(feature_map_size / pooling_rate / 16 * filter_num,
feature_map_size / pooling_rate / 16 * input_num);
dim3 threads = dim3(16, 16);
hiprandGenerateUniform(rnd_gen, rnd_array, rnd_num);
hipLaunchKernelGGL(( max_pooling_kernel), dim3(blocks), dim3(threads), 0, 0, y_h, y_h_probs, y_p,
feature_map_size, filter_num, pooling_rate, rnd_array, rnd_num);
hipDeviceSynchronize();
}
void CRBM::GPU_convolution_backward(float *y_h, float *filters, float *vbias,
float *y_v_probs, float *y_v){
dim3 blocks = dim3(input_size / 16 * channel_num, input_size / 16 * input_num);
dim3 threads = dim3(16, 16);
hiprandGenerateUniform(rnd_gen, rnd_array, rnd_num);
hipLaunchKernelGGL(( convolution_backward_kernel), dim3(blocks), dim3(threads), 0, 0, y_h,
filters, vbias, y_v_probs, y_v, input_size, left_upper_padding,
channel_num, feature_map_size, filter_num, filter_size, rnd_array, rnd_num);
hipDeviceSynchronize();
}
void CRBM::GPU_compute_d_w(float *v, float *h, float *dw, bool is_init){
dim3 blocks = dim3(channel_num * filter_num * feature_map_size / 32,
input_num * feature_map_size / 32);
dim3 threads = dim3(filter_size, filter_size);
hipLaunchKernelGGL(( compute_d_w_kernel), dim3(blocks), dim3(threads), 0, 0, v, h, dw, is_init, input_size, left_upper_padding,
channel_num, filter_num, filter_size, feature_map_size);
hipDeviceSynchronize();
}
void CRBM::run_batch(int cur_trial, int cur_image, int cur_batch, Matrix& batch_data){
batch_data.assign(*this->CPU_input);
this->GPU_input->copyFromHost(*this->CPU_input);
if(this->cur_trial > 5)
this->momentum = 0.9;
if(this->cur_image != cur_image && this->sigma > 0.1)
this->sigma *= 0.99;
this->cur_trial = cur_trial;
this->cur_image = cur_image;
this->cur_batch = cur_batch;
cout << "trial : " << cur_trial << " image : " << cur_image << " batch : " << cur_batch << endl;
start();
}
void CRBM::start(){
bool cheak_euqality = false;
bool run_CPU = false;
bool run_GPU = true;
bool check_nan = true;
struct timeval _start_time, _end_time;
if(run_CPU){
/* CPU computation */
/**********************************/
timeFunc(this->CPU_convolution_forward(this->CPU_input->get_data(),
this->CPU_filters->get_data(), this->CPU_y_h->get_data(),
this->CPU_hbias->get_data()), "CPU convolutional forward");
if(check_nan){
assert(this->CPU_input->check_nan());
assert(this->CPU_y_h->check_nan());
}
timeFunc(this->CPU_max_pooling(this->CPU_y_h->get_data(),
this->CPU_y_h_probs->get_data(), this->CPU_y_p->get_data()),
"CPU max pooling");
if(check_nan){
assert(this->CPU_y_h->check_nan());
assert(this->CPU_y_h_probs->check_nan());
}
timeFunc(this->CPU_convolution_backward(this->CPU_y_h->get_data(),
//timeFunc(this->CPU_convolution_backward(this->CPU_y_h_probs->get_data(),
this->CPU_filters->get_data(), this->CPU_vbias->get_data(),
this->CPU_y_v_probs->get_data(), this->CPU_y_v->get_data()),
"CPU convolutional backward");
if(check_nan){
assert(this->CPU_y_v_probs->check_nan());
}
timeFunc(this->CPU_convolution_forward(this->CPU_y_v_probs->get_data(),
this->CPU_filters->get_data(), this->CPU_y_h2->get_data(),
this->CPU_hbias->get_data()), "CPU convolutional forward");
timeFunc(this->CPU_max_pooling(this->CPU_y_h2->get_data(),
this->CPU_y_h2_probs->get_data(), this->CPU_y_p->get_data()),
"CPU max pooling");
timeFunc(this->CPU_compute_d_w(this->CPU_input->get_data(),
this->CPU_y_h_probs->get_data(), this->CPU_d_w->get_data(),
true), "CPU compute dw positive phase");
timeFunc(this->CPU_compute_d_w(this->CPU_y_v_probs->get_data(),
this->CPU_y_h2_probs->get_data(), this->CPU_d_w->get_data(),
false), "CPU compute dw negative phase");
this->CPU_d_w->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->CPU_d_w->mat_add(*this->CPU_filters, -this->l2reg);
this->CPU_y_h_probs->mat_sum(1, *this->CPU_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->CPU_d_h_sum_tmp->mat_sum(0, *this->CPU_d_hbias);
this->CPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->CPU_y_h2_probs->mat_sum(1, *this->CPU_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->CPU_d_h_sum_tmp->mat_sum(0, *this->CPU_d_hbias_tmp);
this->CPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->CPU_d_hbias->mat_add(*this->CPU_d_hbias_tmp, -1.0f);
this->CPU_d_hbias->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->CPU_y_h_probs->mat_sum(1, *this->CPU_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->CPU_d_h_sum_tmp->mat_sum(0, *this->CPU_d_hbias_tmp);
this->CPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->CPU_d_hbias_tmp->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->CPU_d_hbias_tmp->ele_add(-this->ph);
this->CPU_d_hbias_tmp->ele_scale(this->ph_lambda);
this->CPU_d_hbias->mat_add(*this->CPU_d_hbias_tmp, -1.0f);
this->CPU_d_w->mat_add(*this->CPU_d_w_pre, *this->CPU_d_w, epsilon, momentum);
this->CPU_d_w->assign(*this->CPU_d_w_pre);
this->CPU_filters->mat_add(*this->CPU_d_w, 1.0f);
this->CPU_d_hbias->mat_add(*this->CPU_d_hbias_pre, *this->CPU_d_hbias, epsilon, momentum);
this->CPU_d_hbias->assign(*this->CPU_d_hbias_pre);
this->CPU_hbias->mat_add(*this->CPU_d_hbias, 1.0f);
this->CPU_y_v_probs->mat_add(*this->CPU_input, -1.0f);
this->CPU_y_v_probs->mat_mul(*this->CPU_y_v_probs);
float cur_ferr = this->CPU_y_v_probs->ele_mean();
float cur_sparsity = this->CPU_y_h_probs->ele_mean();
this->ferr += cur_ferr;
this->sparsity += cur_sparsity;
}
/**********************************/
if(run_GPU){
/* GPU computation */
/**********************************/
Matrix* tmp = new Matrix(CPU_filters->get_row_num(), this->CPU_filters->get_col_num());
timeFunc(this->GPU_convolution_forward(this->GPU_input->get_data(),
this->GPU_filters->get_data(), this->GPU_y_h->get_data(),
this->GPU_hbias->get_data()), "GPU convolutional forward");
timeFunc(this->GPU_max_pooling(this->GPU_y_h->get_data(),
this->GPU_y_h_probs->get_data(), this->GPU_y_p->get_data()),
"GPU max pooling");
timeFunc(this->GPU_convolution_backward(this->GPU_y_h->get_data(),
//timeFunc(this->GPU_convolution_backward(this->GPU_y_h_probs->get_data(),
this->GPU_filters->get_data(), this->GPU_vbias->get_data(),
this->GPU_y_v_probs->get_data(), this->GPU_y_v->get_data()),
"GPU convolutional backward");
timeFunc(this->GPU_convolution_forward(this->GPU_y_v_probs->get_data(),
this->GPU_filters->get_data(), this->GPU_y_h2->get_data(),
this->GPU_hbias->get_data()), "GPU convolutional forward");
timeFunc(this->GPU_max_pooling(this->GPU_y_h2->get_data(),
this->GPU_y_h2_probs->get_data(), this->GPU_y_p->get_data()),
"GPU max pooling");
this->GPU_d_w->mat_init(0.0f);
timeFunc(this->GPU_compute_d_w(this->GPU_input->get_data(),
this->GPU_y_h_probs->get_data(), this->GPU_d_w->get_data(),
true), "GPU compute dw positive phase");
//this->GPU_d_w->assign(*tmp);
timeFunc(this->GPU_compute_d_w(this->GPU_y_v_probs->get_data(),
this->GPU_y_h2_probs->get_data(), this->GPU_d_w->get_data(),
false), "GPU compute dw negative phase");
//this->GPU_d_w->assign(*tmp);
this->GPU_d_w->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->GPU_d_w->mat_add(*this->GPU_filters, -this->l2reg);
this->GPU_y_h_probs->mat_sum(1, *this->GPU_d_h_sum_tmp);
this->GPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->GPU_d_h_sum_tmp->mat_sum(0, *this->GPU_d_hbias);
this->GPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->GPU_y_h2_probs->mat_sum(1, *this->GPU_d_h_sum_tmp);
this->GPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->GPU_d_h_sum_tmp->mat_sum(0, *this->GPU_d_hbias_tmp);
this->GPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->GPU_d_hbias->mat_add(*this->GPU_d_hbias_tmp, -1.0f);
this->GPU_d_hbias->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->GPU_y_h_probs->mat_sum(1, *this->GPU_d_h_sum_tmp);
this->GPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->GPU_d_h_sum_tmp->mat_sum(0, *this->GPU_d_hbias_tmp);
this->GPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->GPU_d_hbias_tmp->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->GPU_d_hbias_tmp->ele_add(-this->ph);
this->GPU_d_hbias_tmp->ele_scale(this->ph_lambda);
this->GPU_d_hbias->mat_add(*this->GPU_d_hbias_tmp, -1.0f);
this->GPU_d_w->mat_add(*this->GPU_d_w_pre, *this->GPU_d_w, epsilon, momentum);
this->GPU_d_w->assign(*this->GPU_d_w_pre);
this->GPU_filters->mat_add(*this->GPU_d_w, 1.0f);
this->GPU_d_hbias->mat_add(*this->GPU_d_hbias_pre, *this->GPU_d_hbias, epsilon, momentum);
this->GPU_d_hbias->assign(*this->GPU_d_hbias_pre);
this->GPU_hbias->mat_add(*this->GPU_d_hbias, 1.0f);
this->GPU_y_v_probs->mat_add(*this->GPU_input, -1.0f);
this->GPU_y_v_probs->mat_mul(*this->GPU_y_v_probs);
float cur_ferr = this->GPU_y_v_probs->ele_mean();
float cur_sparsity = this->GPU_y_h_probs->ele_mean();
this->ferr += cur_ferr;
this->sparsity += cur_sparsity;
delete tmp;
}
if(cheak_euqality){
/*
* CPU and GPU equality test
*/
/*cout << "y_h : ";
Matrix* tmp_y_h = new Matrix(this->CPU_y_h->get_row_num(),
this->CPU_y_h->get_col_num());
this->GPU_y_h->assign(*tmp_y_h);
this->CPU_y_h->equal_value(*tmp_y_h);
delete tmp_y_h;*/
cout << "y_h_probs : ";
Matrix* tmp_y_h_probs = new Matrix(this->CPU_y_h_probs->get_row_num(),
this->CPU_y_h_probs->get_col_num());
this->GPU_y_h_probs->assign(*tmp_y_h_probs);
this->CPU_y_h_probs->equal_value(*tmp_y_h_probs);
delete tmp_y_h_probs;
cout << "y_v_probs : ";
Matrix* tmp_y_v_probs = new Matrix(this->CPU_y_v_probs->get_row_num(),
this->CPU_y_v_probs->get_col_num());
this->GPU_y_v_probs->assign(*tmp_y_v_probs);
this->CPU_y_v_probs->equal_value(*tmp_y_v_probs);
delete tmp_y_v_probs;
cout << "y_h2_probs : ";
Matrix* tmp_y_h2_probs = new Matrix(this->CPU_y_h2_probs->get_row_num(),
this->CPU_y_h2_probs->get_col_num());
this->GPU_y_h2_probs->assign(*tmp_y_h2_probs);
this->CPU_y_h2_probs->equal_value(*tmp_y_h2_probs);
delete tmp_y_h2_probs;
cout << "d_w : ";
Matrix* tmp_d_w = new Matrix(this->CPU_d_w->get_row_num(),
this->CPU_d_w->get_col_num());
this->GPU_d_w->assign(*tmp_d_w);
this->CPU_d_w->equal_value(*tmp_d_w, 1e-7);
delete tmp_d_w;
cout << "d_hbias : ";
Matrix* tmp_d_hbias = new Matrix(this->CPU_d_hbias->get_row_num(),
this->CPU_d_hbias->get_col_num());
this->GPU_d_hbias->assign(*tmp_d_hbias);
this->CPU_d_hbias->equal_value(*tmp_d_hbias);
delete tmp_d_hbias;
cout << "d_h_sum_tmp : ";
Matrix* tmp_d_h_sum_tmp = new Matrix(this->CPU_d_h_sum_tmp->get_row_num(),
this->CPU_d_h_sum_tmp->get_col_num());
this->GPU_d_h_sum_tmp->assign(*tmp_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->equal_value(*tmp_d_h_sum_tmp);
delete tmp_d_h_sum_tmp;
cout << "filter : ";
Matrix* tmp_filters = new Matrix(this->CPU_filters->get_row_num(),
this->CPU_filters->get_col_num());
this->GPU_filters->assign(*tmp_filters);
this->CPU_filters->equal_value(*tmp_filters);
delete tmp_filters;
cout << "hbias : ";
Matrix* tmp_hbias = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->GPU_hbias->assign(*tmp_hbias);
this->CPU_hbias->equal_value(*tmp_hbias);
delete tmp_hbias;
}
}
| 1c9b6d506aad5c5e6e302179e91f707ccd0e2900.cu | #include "matrix.h"
#include "crbm.cuh"
#include <iostream>
#include "utils.h"
#include "crbm_kernel.cuh"
#include <cassert>
using namespace std;
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
__global__ void setup_curand_kernel(curandState *state, int count){
int id = threadIdx.x + blockIdx.x * 64;
if(id < count){
curand_init(1234, id, 0, &state[id]);
}
}
void setup_curand(curandState **state, int count){
CUDA_CALL(cudaMalloc((void**)state, count * sizeof(curandState)));
setup_curand_kernel<<< ceil(count/64.0), 64>>>(*state, count);
}
CRBM::CRBM(int filter_num, int filter_size,
int input_num, int input_size, int channel_num,
int left_upper_padding, int right_low_padding,
int pooling_rate,
Matrix *filters, Matrix *hbias,
Matrix *vbias){
this->epsilon = 0.01;
this->momentum = 0.5;
this->l2reg = 0.01;
this->ph_lambda = 5;
this->ph = 0.002;
this->sigma = 0.2;
this->cur_trial = 0;
this->filter_num = filter_num;
this->filter_size = filter_size;
this->input_num = input_num;
this->input_size = input_size;
this->pooling_rate = pooling_rate;
this->channel_num = channel_num;
this->left_upper_padding = left_upper_padding;
this->right_low_padding = right_low_padding;
this->feature_map_size = input_size + left_upper_padding +
right_low_padding - filter_size + 1;
this->subsample_size = feature_map_size / pooling_rate;
if(filters == NULL){
this->CPU_filters = filter_init(filter_size, filter_num, channel_num);
}else{
this->CPU_filters = new Matrix(*filters);
}
if(hbias == NULL){
this->CPU_hbias = new Matrix(filter_num, 1, -0.1f, -0.1f);
}else{
this->CPU_hbias = new Matrix(*hbias);
}
if(vbias == NULL){
this->CPU_vbias = new Matrix(channel_num, 1);
}else{
this->CPU_vbias = new Matrix(*vbias);
}
this->CPU_input = new Matrix(input_num, channel_num * input_size * input_size);
this->CPU_y_h = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
this->CPU_y_h_probs = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
this->CPU_y_h2 = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
this->CPU_y_h2_probs = new Matrix(input_num ,
filter_num * feature_map_size * feature_map_size);
//filter_num * feature_map_size * feature_map_size, 1, 1);
this->CPU_y_p = new Matrix(input_num,
filter_num * subsample_size * subsample_size);
this->CPU_y_v = new Matrix(this->CPU_input->get_row_num(),
this->CPU_input->get_col_num());
this->CPU_y_v_probs = new Matrix(this->CPU_input->get_row_num(),
this->CPU_input->get_col_num());
this->CPU_d_w = new Matrix(this->CPU_filters->get_row_num(),
this->CPU_filters->get_col_num());
this->CPU_d_w_pre = new Matrix(this->CPU_filters->get_row_num(),
this->CPU_filters->get_col_num());
this->CPU_d_hbias = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->CPU_d_hbias_pre = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->CPU_d_hbias_tmp = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->CPU_d_h_sum_tmp = new Matrix(1, this->CPU_y_h->get_col_num());
this->GPU_filters = new NVMatrix(*this->CPU_filters);
this->GPU_hbias = new NVMatrix(*this->CPU_hbias);
this->GPU_vbias = new NVMatrix(*this->CPU_vbias);
this->GPU_input = new NVMatrix(*this->CPU_input);
this->GPU_y_h = new NVMatrix(*this->CPU_y_h);
this->GPU_y_h_probs = new NVMatrix(*this->CPU_y_h_probs);
this->GPU_y_h2 = new NVMatrix(*this->CPU_y_h2);
this->GPU_y_h2_probs = new NVMatrix(*this->CPU_y_h2_probs);
this->GPU_y_p = new NVMatrix(*this->CPU_y_p);
this->GPU_y_v = new NVMatrix(*this->CPU_y_v);
this->GPU_y_v_probs = new NVMatrix(*this->CPU_y_v_probs);
this->GPU_d_w = new NVMatrix(*this->CPU_d_w);
this->GPU_d_w_pre = new NVMatrix(*this->CPU_d_w);
this->GPU_d_hbias = new NVMatrix(*this->CPU_d_hbias);
this->GPU_d_hbias_pre = new NVMatrix(*this->CPU_d_hbias);
this->GPU_d_hbias_tmp = new NVMatrix(*this->CPU_d_hbias_tmp);
this->GPU_d_h_sum_tmp= new NVMatrix(*this->CPU_d_h_sum_tmp);
this->rnd_num = std::max(input_num * channel_num * input_size * input_size, input_num * feature_map_size * feature_map_size / (pooling_rate * pooling_rate));
curandCreateGenerator(&this->rnd_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(this->rnd_gen, 1234ULL);
cudaMalloc((void **)&this->rnd_array, this->rnd_num * sizeof(float));
//setup_curand(&this->rnd_state, this->rnd_state_num);
}
CRBM::~CRBM(){
delete this->CPU_filters;
delete this->CPU_hbias;
delete this->CPU_vbias;
delete this->CPU_y_h;
delete this->CPU_y_h_probs;
delete this->CPU_y_p;
delete this->CPU_y_v;
delete this->CPU_y_v_probs;
delete this->CPU_d_w;
delete this->CPU_d_w_pre;
delete this->CPU_d_hbias;
delete this->CPU_d_hbias_pre;
delete this->CPU_d_hbias_tmp;
delete this->CPU_d_h_sum_tmp;
delete this->GPU_filters;
delete this->GPU_hbias;
delete this->GPU_vbias;
delete this->GPU_y_h;
delete this->GPU_y_h_probs;
delete this->GPU_y_p;
delete this->GPU_y_v;
delete this->GPU_y_v_probs;
delete this->GPU_d_w;
delete this->GPU_d_w_pre;
delete this->GPU_d_hbias;
delete this->GPU_d_hbias_pre;
delete this->GPU_d_hbias_tmp;
delete this->GPU_d_h_sum_tmp;
CUDA_CALL(cudaFree(this->rnd_array));
curandDestroyGenerator(this->rnd_gen);
}
Matrix* CRBM::filter_init(int filter_size, int filter_num, int channel_num){
float low = - 4 * sqrt(6.0 / (2 * filter_size * filter_size * channel_num));
float upper = -low;
return new Matrix(filter_num, channel_num*filter_size*filter_size, low, upper);
}
void CRBM::CPU_convolution_forward(float *input, float *filter, float *target, float *hbias){
bzero(target, input_num * filter_num * feature_map_size * feature_map_size * sizeof(float));
for(int img = 0; img < input_num; img++){
for(int fil = 0; fil < filter_num; fil++){
float *curBias = hbias + fil;
for(int r = 0; r < feature_map_size; r++){
for(int c = 0; c < feature_map_size; c++){
float *curFilter = filter + fil * channel_num * filter_size * filter_size;
float* curTarget = target + img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size +
r * feature_map_size + c;
for(int k = 0; k < channel_num; k++){
float* curInput = input + img * channel_num * input_size * input_size +
k * input_size * input_size +
(r < left_upper_padding ? 0 : r - left_upper_padding) * input_size +
(c < left_upper_padding ? 0 : c - left_upper_padding);
for(int i = 0; i < filter_size; i++){
if(!((r+i) < left_upper_padding ||
(r+i) >= (left_upper_padding + input_size))){
int step = 0;
for(int j = 0; j < filter_size; j++){
if(!((c+j) < left_upper_padding ||
(c+j) >= (left_upper_padding + input_size))){
*curTarget += curFilter[i*filter_size+j] * (*curInput);
curInput++;
step++;
}
}
curInput += input_size - step;
}
}
curFilter += filter_size * filter_size;
}
*curTarget += *curBias;
*curTarget = (1.0 / (this->sigma * this->sigma)) * (*curTarget);
}
}
}
}
}
static int max_pooling_multinomial(float *probs, int len){
float rnd = random_float(0, 1);
int i;
for(i = 0; rnd > probs[i]; i++, probs[i] += probs[i-1]);
return i;
}
void CRBM::CPU_max_pooling(float *y_h, float *y_h_probs, float *y_p){
float pooling_area[MAX_POOLING_RATE*MAX_FILETER_SIZE+1];
for(int img = 0; img < input_num; img++){
for(int fil = 0; fil < filter_num; fil++){
float *fm = y_h +
img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
float *probs = y_h_probs +
img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
float *target = y_p +
img * filter_num * subsample_size * subsample_size +
fil * subsample_size * subsample_size;
for(int i = 0; i < feature_map_size; i += pooling_rate){
for(int j = 0; j < feature_map_size; j += pooling_rate){
float sum = 0;
for(int pi = 0; pi < pooling_rate; pi++){
for(int pj = 0; pj < pooling_rate; pj++){
float *cur_fm = fm + (i+pi) * feature_map_size + (j+pj);
if(*cur_fm > 50)
*cur_fm = 50;
*cur_fm = expf(*cur_fm);
assert(!isinf(*cur_fm));
sum += *cur_fm;
}
}
for(int pi = 0; pi < pooling_rate; pi++){
for(int pj = 0; pj < pooling_rate; pj++){
float *cur_fm = fm + (i+pi) * feature_map_size + (j+pj);
float *cur_probs = probs + (i+pi) * feature_map_size + (j+pj);
*cur_probs = *cur_fm / (1 + sum);
pooling_area[pi*pooling_rate+pj] = *cur_probs;
*cur_fm = 0;
}
}
pooling_area[pooling_rate*pooling_rate] = 1.0/(1+sum);
int pooling_idx = max_pooling_multinomial(pooling_area,
pooling_rate*pooling_rate+1);
if(pooling_idx == pooling_rate*pooling_rate){
target[(i/pooling_rate)*subsample_size+(j/pooling_rate)] = 0;
}else{
target[(i/pooling_rate)*subsample_size+(j/pooling_rate)] = 1;
int pi = pooling_idx / pooling_rate;
int pj = pooling_idx % pooling_rate;
fm[(i+pi) * feature_map_size + (j+pj)] = 1;
}
}
}
}
}
}
void CRBM::CPU_convolution_backward(float *y_h, float *filters, float *vbias,
float *y_v_probs, float *y_v){
float tmp_recon[MAX_IMGAG_SIZE][MAX_IMGAG_SIZE];
int padding = filter_size-1;
int input_padding_size = feature_map_size + filter_size - 1;
int lu_padding = left_upper_padding;
bzero(tmp_recon, sizeof(tmp_recon));
for(int img = 0; img < input_num; img++){
for(int cha = 0; cha < channel_num; cha++){
float *target = y_v_probs +
img * channel_num * input_size * input_size +
cha * input_size * input_size;
float *target_y_v = y_v +
img * channel_num * input_size * input_size +
cha * input_size * input_size;
for(int fil = 0; fil < filter_num; fil++){
float *filter = filters +
fil * filter_size * filter_size * channel_num +
cha * filter_size * filter_size;
float *fm = y_h +
img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
for(int r = 0; r < feature_map_size + filter_size - 1; r++){
for(int c = 0; c < feature_map_size + filter_size - 1; c++){
for(int i = r; i < r+filter_size; i++){
for(int j = c; j < c+filter_size; j++){
if(!(i < padding || j < padding ||
i >= (padding + feature_map_size) ||
j >= (padding + feature_map_size))){
tmp_recon[r][c] +=
fm[(i-padding)*feature_map_size + (j-padding)] *
filter[(filter_size-1-(i-r))*filter_size + (filter_size-1-(j-c))];
}
}
}
}
}
}
for(int i = 0; i < input_size; i++){
for(int j = 0; j < input_size; j++){
target[i*input_size+j] = tmp_recon[i+lu_padding][j+lu_padding];
//target[i*input_size+j] = logisitc(tmp_recon[i+lu_padding][j+lu_padding]);
//target_y_v[i*input_size+j] =
// (random_float(0,1) < target[i*input_size+j]) ? 1 : 0;
}
}
bzero(tmp_recon, sizeof(tmp_recon));
}
}
}
/*
* 分为positive phase和negative phase
* is_init为true则计算positive phase, dw初始化为0
* is_init为false则计算negative phase, dw -= new_dw
*/
void CRBM::CPU_compute_d_w(float *v, float *h, float *dw, bool is_init){
float sign;
int lu_padding = left_upper_padding;
if(is_init){
bzero(dw, filter_num * channel_num * filter_size * filter_size * sizeof(float));
sign = 1.0f;
}else{
sign = -1.0f;
}
for(int img = 0; img < input_num; img++){
for(int fil = 0; fil < filter_num; fil++){
float *this_h = h + img * filter_num * feature_map_size * feature_map_size +
fil * feature_map_size * feature_map_size;
for(int cha = 0; cha < channel_num; cha++){
float *this_v = v + img * channel_num * input_size * input_size +
cha * input_size * input_size;
float *this_dw = dw + fil * channel_num * filter_size * filter_size +
cha * filter_size * filter_size;
for(int r = 0; r < filter_size; r++){
for(int c = 0; c < filter_size; c++){
float *cur_v = this_v + (r-lu_padding) * input_size +
(c-lu_padding);
for(int i = 0; i < feature_map_size; i++){
for(int j = 0; j < feature_map_size; j++){
if(!((r+i) < lu_padding ||
(c+j) < lu_padding ||
(r+i) >= (lu_padding+input_size) ||
(c+j) >= (lu_padding+input_size))){
this_dw[r*filter_size+c] +=
sign * cur_v[j] * this_h[i*feature_map_size+j];
}
}
cur_v += input_size;
}
}
}
}
}
}
}
void CRBM::GPU_convolution_forward(float *input, float *filters, float *y_h, float *hbias){
dim3 blocks = dim3(input_size / 32 * filter_num, input_size / 32 * input_num);
dim3 threads = dim3(32, 32);
convolution_forward_kernel<<<blocks, threads>>>(input, filters, y_h,
hbias, input_size, channel_num, feature_map_size, filter_size,
filter_num, left_upper_padding, sigma);
cudaDeviceSynchronize();
}
void CRBM::GPU_max_pooling(float *y_h, float *y_h_probs, float *y_p){
dim3 blocks = dim3(feature_map_size / pooling_rate / 16 * filter_num,
feature_map_size / pooling_rate / 16 * input_num);
dim3 threads = dim3(16, 16);
curandGenerateUniform(rnd_gen, rnd_array, rnd_num);
max_pooling_kernel<<<blocks, threads>>>(y_h, y_h_probs, y_p,
feature_map_size, filter_num, pooling_rate, rnd_array, rnd_num);
cudaDeviceSynchronize();
}
void CRBM::GPU_convolution_backward(float *y_h, float *filters, float *vbias,
float *y_v_probs, float *y_v){
dim3 blocks = dim3(input_size / 16 * channel_num, input_size / 16 * input_num);
dim3 threads = dim3(16, 16);
curandGenerateUniform(rnd_gen, rnd_array, rnd_num);
convolution_backward_kernel<<<blocks, threads>>>(y_h,
filters, vbias, y_v_probs, y_v, input_size, left_upper_padding,
channel_num, feature_map_size, filter_num, filter_size, rnd_array, rnd_num);
cudaDeviceSynchronize();
}
void CRBM::GPU_compute_d_w(float *v, float *h, float *dw, bool is_init){
dim3 blocks = dim3(channel_num * filter_num * feature_map_size / 32,
input_num * feature_map_size / 32);
dim3 threads = dim3(filter_size, filter_size);
compute_d_w_kernel<<<blocks, threads>>>(v, h, dw, is_init, input_size, left_upper_padding,
channel_num, filter_num, filter_size, feature_map_size);
cudaDeviceSynchronize();
}
void CRBM::run_batch(int cur_trial, int cur_image, int cur_batch, Matrix& batch_data){
batch_data.assign(*this->CPU_input);
this->GPU_input->copyFromHost(*this->CPU_input);
if(this->cur_trial > 5)
this->momentum = 0.9;
if(this->cur_image != cur_image && this->sigma > 0.1)
this->sigma *= 0.99;
this->cur_trial = cur_trial;
this->cur_image = cur_image;
this->cur_batch = cur_batch;
cout << "trial : " << cur_trial << " image : " << cur_image << " batch : " << cur_batch << endl;
start();
}
void CRBM::start(){
bool cheak_euqality = false;
bool run_CPU = false;
bool run_GPU = true;
bool check_nan = true;
struct timeval _start_time, _end_time;
if(run_CPU){
/* CPU computation */
/**********************************/
timeFunc(this->CPU_convolution_forward(this->CPU_input->get_data(),
this->CPU_filters->get_data(), this->CPU_y_h->get_data(),
this->CPU_hbias->get_data()), "CPU convolutional forward");
if(check_nan){
assert(this->CPU_input->check_nan());
assert(this->CPU_y_h->check_nan());
}
timeFunc(this->CPU_max_pooling(this->CPU_y_h->get_data(),
this->CPU_y_h_probs->get_data(), this->CPU_y_p->get_data()),
"CPU max pooling");
if(check_nan){
assert(this->CPU_y_h->check_nan());
assert(this->CPU_y_h_probs->check_nan());
}
timeFunc(this->CPU_convolution_backward(this->CPU_y_h->get_data(),
//timeFunc(this->CPU_convolution_backward(this->CPU_y_h_probs->get_data(),
this->CPU_filters->get_data(), this->CPU_vbias->get_data(),
this->CPU_y_v_probs->get_data(), this->CPU_y_v->get_data()),
"CPU convolutional backward");
if(check_nan){
assert(this->CPU_y_v_probs->check_nan());
}
timeFunc(this->CPU_convolution_forward(this->CPU_y_v_probs->get_data(),
this->CPU_filters->get_data(), this->CPU_y_h2->get_data(),
this->CPU_hbias->get_data()), "CPU convolutional forward");
timeFunc(this->CPU_max_pooling(this->CPU_y_h2->get_data(),
this->CPU_y_h2_probs->get_data(), this->CPU_y_p->get_data()),
"CPU max pooling");
timeFunc(this->CPU_compute_d_w(this->CPU_input->get_data(),
this->CPU_y_h_probs->get_data(), this->CPU_d_w->get_data(),
true), "CPU compute dw positive phase");
timeFunc(this->CPU_compute_d_w(this->CPU_y_v_probs->get_data(),
this->CPU_y_h2_probs->get_data(), this->CPU_d_w->get_data(),
false), "CPU compute dw negative phase");
this->CPU_d_w->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->CPU_d_w->mat_add(*this->CPU_filters, -this->l2reg);
this->CPU_y_h_probs->mat_sum(1, *this->CPU_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->CPU_d_h_sum_tmp->mat_sum(0, *this->CPU_d_hbias);
this->CPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->CPU_y_h2_probs->mat_sum(1, *this->CPU_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->CPU_d_h_sum_tmp->mat_sum(0, *this->CPU_d_hbias_tmp);
this->CPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->CPU_d_hbias->mat_add(*this->CPU_d_hbias_tmp, -1.0f);
this->CPU_d_hbias->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->CPU_y_h_probs->mat_sum(1, *this->CPU_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->CPU_d_h_sum_tmp->mat_sum(0, *this->CPU_d_hbias_tmp);
this->CPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->CPU_d_hbias_tmp->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->CPU_d_hbias_tmp->ele_add(-this->ph);
this->CPU_d_hbias_tmp->ele_scale(this->ph_lambda);
this->CPU_d_hbias->mat_add(*this->CPU_d_hbias_tmp, -1.0f);
this->CPU_d_w->mat_add(*this->CPU_d_w_pre, *this->CPU_d_w, epsilon, momentum);
this->CPU_d_w->assign(*this->CPU_d_w_pre);
this->CPU_filters->mat_add(*this->CPU_d_w, 1.0f);
this->CPU_d_hbias->mat_add(*this->CPU_d_hbias_pre, *this->CPU_d_hbias, epsilon, momentum);
this->CPU_d_hbias->assign(*this->CPU_d_hbias_pre);
this->CPU_hbias->mat_add(*this->CPU_d_hbias, 1.0f);
this->CPU_y_v_probs->mat_add(*this->CPU_input, -1.0f);
this->CPU_y_v_probs->mat_mul(*this->CPU_y_v_probs);
float cur_ferr = this->CPU_y_v_probs->ele_mean();
float cur_sparsity = this->CPU_y_h_probs->ele_mean();
this->ferr += cur_ferr;
this->sparsity += cur_sparsity;
}
/**********************************/
if(run_GPU){
/* GPU computation */
/**********************************/
Matrix* tmp = new Matrix(CPU_filters->get_row_num(), this->CPU_filters->get_col_num());
timeFunc(this->GPU_convolution_forward(this->GPU_input->get_data(),
this->GPU_filters->get_data(), this->GPU_y_h->get_data(),
this->GPU_hbias->get_data()), "GPU convolutional forward");
timeFunc(this->GPU_max_pooling(this->GPU_y_h->get_data(),
this->GPU_y_h_probs->get_data(), this->GPU_y_p->get_data()),
"GPU max pooling");
timeFunc(this->GPU_convolution_backward(this->GPU_y_h->get_data(),
//timeFunc(this->GPU_convolution_backward(this->GPU_y_h_probs->get_data(),
this->GPU_filters->get_data(), this->GPU_vbias->get_data(),
this->GPU_y_v_probs->get_data(), this->GPU_y_v->get_data()),
"GPU convolutional backward");
timeFunc(this->GPU_convolution_forward(this->GPU_y_v_probs->get_data(),
this->GPU_filters->get_data(), this->GPU_y_h2->get_data(),
this->GPU_hbias->get_data()), "GPU convolutional forward");
timeFunc(this->GPU_max_pooling(this->GPU_y_h2->get_data(),
this->GPU_y_h2_probs->get_data(), this->GPU_y_p->get_data()),
"GPU max pooling");
this->GPU_d_w->mat_init(0.0f);
timeFunc(this->GPU_compute_d_w(this->GPU_input->get_data(),
this->GPU_y_h_probs->get_data(), this->GPU_d_w->get_data(),
true), "GPU compute dw positive phase");
//this->GPU_d_w->assign(*tmp);
timeFunc(this->GPU_compute_d_w(this->GPU_y_v_probs->get_data(),
this->GPU_y_h2_probs->get_data(), this->GPU_d_w->get_data(),
false), "GPU compute dw negative phase");
//this->GPU_d_w->assign(*tmp);
this->GPU_d_w->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->GPU_d_w->mat_add(*this->GPU_filters, -this->l2reg);
this->GPU_y_h_probs->mat_sum(1, *this->GPU_d_h_sum_tmp);
this->GPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->GPU_d_h_sum_tmp->mat_sum(0, *this->GPU_d_hbias);
this->GPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->GPU_y_h2_probs->mat_sum(1, *this->GPU_d_h_sum_tmp);
this->GPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->GPU_d_h_sum_tmp->mat_sum(0, *this->GPU_d_hbias_tmp);
this->GPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->GPU_d_hbias->mat_add(*this->GPU_d_hbias_tmp, -1.0f);
this->GPU_d_hbias->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->GPU_y_h_probs->mat_sum(1, *this->GPU_d_h_sum_tmp);
this->GPU_d_h_sum_tmp->reshape(filter_num, feature_map_size * feature_map_size);
this->GPU_d_h_sum_tmp->mat_sum(0, *this->GPU_d_hbias_tmp);
this->GPU_d_h_sum_tmp->reshape(1, filter_num * feature_map_size * feature_map_size);
this->GPU_d_hbias_tmp->ele_scale(1.0 / (input_num * feature_map_size * feature_map_size));
this->GPU_d_hbias_tmp->ele_add(-this->ph);
this->GPU_d_hbias_tmp->ele_scale(this->ph_lambda);
this->GPU_d_hbias->mat_add(*this->GPU_d_hbias_tmp, -1.0f);
this->GPU_d_w->mat_add(*this->GPU_d_w_pre, *this->GPU_d_w, epsilon, momentum);
this->GPU_d_w->assign(*this->GPU_d_w_pre);
this->GPU_filters->mat_add(*this->GPU_d_w, 1.0f);
this->GPU_d_hbias->mat_add(*this->GPU_d_hbias_pre, *this->GPU_d_hbias, epsilon, momentum);
this->GPU_d_hbias->assign(*this->GPU_d_hbias_pre);
this->GPU_hbias->mat_add(*this->GPU_d_hbias, 1.0f);
this->GPU_y_v_probs->mat_add(*this->GPU_input, -1.0f);
this->GPU_y_v_probs->mat_mul(*this->GPU_y_v_probs);
float cur_ferr = this->GPU_y_v_probs->ele_mean();
float cur_sparsity = this->GPU_y_h_probs->ele_mean();
this->ferr += cur_ferr;
this->sparsity += cur_sparsity;
delete tmp;
}
if(cheak_euqality){
/*
* CPU and GPU equality test
*/
/*cout << "y_h : ";
Matrix* tmp_y_h = new Matrix(this->CPU_y_h->get_row_num(),
this->CPU_y_h->get_col_num());
this->GPU_y_h->assign(*tmp_y_h);
this->CPU_y_h->equal_value(*tmp_y_h);
delete tmp_y_h;*/
cout << "y_h_probs : ";
Matrix* tmp_y_h_probs = new Matrix(this->CPU_y_h_probs->get_row_num(),
this->CPU_y_h_probs->get_col_num());
this->GPU_y_h_probs->assign(*tmp_y_h_probs);
this->CPU_y_h_probs->equal_value(*tmp_y_h_probs);
delete tmp_y_h_probs;
cout << "y_v_probs : ";
Matrix* tmp_y_v_probs = new Matrix(this->CPU_y_v_probs->get_row_num(),
this->CPU_y_v_probs->get_col_num());
this->GPU_y_v_probs->assign(*tmp_y_v_probs);
this->CPU_y_v_probs->equal_value(*tmp_y_v_probs);
delete tmp_y_v_probs;
cout << "y_h2_probs : ";
Matrix* tmp_y_h2_probs = new Matrix(this->CPU_y_h2_probs->get_row_num(),
this->CPU_y_h2_probs->get_col_num());
this->GPU_y_h2_probs->assign(*tmp_y_h2_probs);
this->CPU_y_h2_probs->equal_value(*tmp_y_h2_probs);
delete tmp_y_h2_probs;
cout << "d_w : ";
Matrix* tmp_d_w = new Matrix(this->CPU_d_w->get_row_num(),
this->CPU_d_w->get_col_num());
this->GPU_d_w->assign(*tmp_d_w);
this->CPU_d_w->equal_value(*tmp_d_w, 1e-7);
delete tmp_d_w;
cout << "d_hbias : ";
Matrix* tmp_d_hbias = new Matrix(this->CPU_d_hbias->get_row_num(),
this->CPU_d_hbias->get_col_num());
this->GPU_d_hbias->assign(*tmp_d_hbias);
this->CPU_d_hbias->equal_value(*tmp_d_hbias);
delete tmp_d_hbias;
cout << "d_h_sum_tmp : ";
Matrix* tmp_d_h_sum_tmp = new Matrix(this->CPU_d_h_sum_tmp->get_row_num(),
this->CPU_d_h_sum_tmp->get_col_num());
this->GPU_d_h_sum_tmp->assign(*tmp_d_h_sum_tmp);
this->CPU_d_h_sum_tmp->equal_value(*tmp_d_h_sum_tmp);
delete tmp_d_h_sum_tmp;
cout << "filter : ";
Matrix* tmp_filters = new Matrix(this->CPU_filters->get_row_num(),
this->CPU_filters->get_col_num());
this->GPU_filters->assign(*tmp_filters);
this->CPU_filters->equal_value(*tmp_filters);
delete tmp_filters;
cout << "hbias : ";
Matrix* tmp_hbias = new Matrix(this->CPU_hbias->get_row_num(),
this->CPU_hbias->get_col_num());
this->GPU_hbias->assign(*tmp_hbias);
this->CPU_hbias->equal_value(*tmp_hbias);
delete tmp_hbias;
}
}
|
99e464ce1ad72df55c42e24a4ae8df88c49e885c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THH.h>
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
extern "C"
{
void SpatialMaxPooling_updateOutput(THCState* state, THCudaTensor* input,
THCudaTensor* output, THCudaTensor* indices, int kW, int kH, int dW, int dH);
void SpatialMaxPooling_updateGradInput(THCState* state, THCudaTensor* input,
THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* indices, int kW, int kH, int dW, int dH);
}
/*
* Description:
* this function maxpools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output, 4D argmax x and y
*/
__global__ void maxpool(float *input, float *output, float *indices_x, float *indices_y,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
const int output_w = ceil(float(input_w - kW) / float(dW) + 1);
const int output_h = ceil(float(input_h - kH) / float(dH) + 1);
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
//int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
const int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
const int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
indices_x = indices_x + o*output_w*output_h;
indices_y = indices_y + o*output_w*output_h;
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float *ptr_ind_x = indices_x + yy*output_w + xx;
float *ptr_ind_y = indices_y + yy*output_w + xx;
int argmax_x = -1;
int argmax_y = -1;
float max = -FLT_MAX;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
float val = ptr_input[kx];
if (val > max) {
max = val;
argmax_x = kx;
argmax_y = ky;
}
}
ptr_input += input_w; // next input line
}
// Update output and argmax
*ptr_output = max;
*ptr_ind_x = argmax_x + 1;
*ptr_ind_y = argmax_y + 1;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
__global__ void maxgradinput(float *gradInput, float *gradOutput, float *indices_x, float *indices_y,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = ceil(float(input_w - kW) / float(dW) + 1);
int output_h = ceil(float(input_h - kH) / float(dH) + 1);
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
//int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
indices_x = indices_x + o*output_w*output_h;
indices_y = indices_y + o*output_w*output_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float *ptr_ind_x = indices_x + yy*output_w + xx;
float *ptr_ind_y = indices_y + yy*output_w + xx;
float z = *ptr_gradOutput;
int argmax_x = (*ptr_ind_x)-1;
int argmax_y = (*ptr_ind_y)-1;
ptr_gradInput[argmax_x + argmax_y*input_w] += z;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
* when kH != dH or kW != dW (uses atomic add)
*/
__global__ void atomicmaxgradinput(
float *gradInput, float *gradOutput, float *indices_x, float *indices_y,
int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW
)
{
// iterators
int xx, yy;
// output size
int output_w = ceil(float(input_w - kW) / float(dW) + 1);
int output_h = ceil(float(input_h - kH) / float(dH) + 1);
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
//int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
indices_x = indices_x + o*output_w*output_h;
indices_y = indices_y + o*output_w*output_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float *ptr_ind_x = indices_x + yy*output_w + xx;
float *ptr_ind_y = indices_y + yy*output_w + xx;
float z = *ptr_gradOutput;
int argmax_x = (*ptr_ind_x)-1;
int argmax_y = (*ptr_ind_y)-1;
// atomic add since different threads could update same variable
atomicAdd(&(ptr_gradInput[argmax_x + argmax_y*input_w]), z);
}
}
}
void SpatialMaxPooling_updateOutput(THCState* state, THCudaTensor* input,
THCudaTensor* output, THCudaTensor* indices, int kW, int kH, int dW, int dH)
{
float *indices_data;
float *output_data;
float *input_data;
//luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
long nOutputCols = ceil(float(nInputCols - kW) / float(dW) + 1);
long nOutputRows = ceil(float(nInputRows - kH) / float(dH) + 1);
//luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize4d(state, indices, 2, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCudaTensor_data(state, indices);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run maxpool kernel
hipLaunchKernelGGL(( maxpool) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
long nOutputCols = ceil(float(nInputCols - kW) / float(dW) + 1);
long nOutputRows = ceil(float(nInputRows - kH) / float(dH) + 1);
//luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize5d(state, indices, 2, nbatch, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCudaTensor_data(state, indices);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run maxpool kernel
hipLaunchKernelGGL(( maxpool) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// clean
THCudaTensor_free(state, input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxsampling.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
void SpatialMaxPooling_updateGradInput(THCState* state, THCudaTensor* input,
THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* indices, int kW, int kH, int dW, int dH)
{
bool atomic = (dW != kW) || (dH != kH);
float *indices_data;
float *gradInput_data;
float *gradOutput_data;
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
long nOutputCols = gradOutput->size[2];
long nOutputRows = gradOutput->size[1];
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
indices_data = THCudaTensor_data(state, indices);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicmaxgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
else
{
// run updateGradInput kernel
hipLaunchKernelGGL(( atomicmaxgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
long nOutputCols = gradOutput->size[3];
long nOutputRows = gradOutput->size[2];
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
indices_data = THCudaTensor_data(state, indices);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicmaxgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
else
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( maxgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
}
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxsampling.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
#undef CUDA_MAX_THREADS
| 99e464ce1ad72df55c42e24a4ae8df88c49e885c.cu | #include <THC/THC.h>
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
extern "C"
{
void SpatialMaxPooling_updateOutput(THCState* state, THCudaTensor* input,
THCudaTensor* output, THCudaTensor* indices, int kW, int kH, int dW, int dH);
void SpatialMaxPooling_updateGradInput(THCState* state, THCudaTensor* input,
THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* indices, int kW, int kH, int dW, int dH);
}
/*
* Description:
* this function maxpools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output, 4D argmax x and y
*/
__global__ void maxpool(float *input, float *output, float *indices_x, float *indices_y,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
const int output_w = ceil(float(input_w - kW) / float(dW) + 1);
const int output_h = ceil(float(input_h - kH) / float(dH) + 1);
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
//int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
const int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
const int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
indices_x = indices_x + o*output_w*output_h;
indices_y = indices_y + o*output_w*output_h;
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float *ptr_ind_x = indices_x + yy*output_w + xx;
float *ptr_ind_y = indices_y + yy*output_w + xx;
int argmax_x = -1;
int argmax_y = -1;
float max = -FLT_MAX;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
float val = ptr_input[kx];
if (val > max) {
max = val;
argmax_x = kx;
argmax_y = ky;
}
}
ptr_input += input_w; // next input line
}
// Update output and argmax
*ptr_output = max;
*ptr_ind_x = argmax_x + 1;
*ptr_ind_y = argmax_y + 1;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
__global__ void maxgradinput(float *gradInput, float *gradOutput, float *indices_x, float *indices_y,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = ceil(float(input_w - kW) / float(dW) + 1);
int output_h = ceil(float(input_h - kH) / float(dH) + 1);
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
//int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
indices_x = indices_x + o*output_w*output_h;
indices_y = indices_y + o*output_w*output_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float *ptr_ind_x = indices_x + yy*output_w + xx;
float *ptr_ind_y = indices_y + yy*output_w + xx;
float z = *ptr_gradOutput;
int argmax_x = (*ptr_ind_x)-1;
int argmax_y = (*ptr_ind_y)-1;
ptr_gradInput[argmax_x + argmax_y*input_w] += z;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
* when kH != dH or kW != dW (uses atomic add)
*/
__global__ void atomicmaxgradinput(
float *gradInput, float *gradOutput, float *indices_x, float *indices_y,
int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW
)
{
// iterators
int xx, yy;
// output size
int output_w = ceil(float(input_w - kW) / float(dW) + 1);
int output_h = ceil(float(input_h - kH) / float(dH) + 1);
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
//int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
indices_x = indices_x + o*output_w*output_h;
indices_y = indices_y + o*output_w*output_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float *ptr_ind_x = indices_x + yy*output_w + xx;
float *ptr_ind_y = indices_y + yy*output_w + xx;
float z = *ptr_gradOutput;
int argmax_x = (*ptr_ind_x)-1;
int argmax_y = (*ptr_ind_y)-1;
// atomic add since different threads could update same variable
atomicAdd(&(ptr_gradInput[argmax_x + argmax_y*input_w]), z);
}
}
}
void SpatialMaxPooling_updateOutput(THCState* state, THCudaTensor* input,
THCudaTensor* output, THCudaTensor* indices, int kW, int kH, int dW, int dH)
{
float *indices_data;
float *output_data;
float *input_data;
//luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
long nOutputCols = ceil(float(nInputCols - kW) / float(dW) + 1);
long nOutputRows = ceil(float(nInputRows - kH) / float(dH) + 1);
//luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize4d(state, indices, 2, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCudaTensor_data(state, indices);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run maxpool kernel
maxpool <<<blocks, threads>>> (input_data, output_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
long nOutputCols = ceil(float(nInputCols - kW) / float(dW) + 1);
long nOutputRows = ceil(float(nInputRows - kH) / float(dH) + 1);
//luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize5d(state, indices, 2, nbatch, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCudaTensor_data(state, indices);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run maxpool kernel
maxpool <<<blocks, threads>>> (input_data, output_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// clean
THCudaTensor_free(state, input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxsampling.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
void SpatialMaxPooling_updateGradInput(THCState* state, THCudaTensor* input,
THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* indices, int kW, int kH, int dW, int dH)
{
bool atomic = (dW != kW) || (dH != kH);
float *indices_data;
float *gradInput_data;
float *gradOutput_data;
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
long nOutputCols = gradOutput->size[2];
long nOutputRows = gradOutput->size[1];
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
indices_data = THCudaTensor_data(state, indices);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomicmaxgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
else
{
// run updateGradInput kernel
atomicmaxgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
long nOutputCols = gradOutput->size[3];
long nOutputRows = gradOutput->size[2];
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
indices_data = THCudaTensor_data(state, indices);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomicmaxgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
else
{
// run updateGradInput kernel, accumulate gradients atomically
maxgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
}
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxsampling.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
#undef CUDA_MAX_THREADS
|
c827b15c052428204f51ba6beba1cd0d2b38a1ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
printf("label_value = %g\n",label_value);
printf("ignore_label_ = %g\n",ignore_label_);
printf("has_ignore_label_ = %d\n",has_ignore_label_);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const Dtype* weights,
Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
/*if (label_value>0 && label_value != ignore_label_){
printf("label_value = %d\n",label_value);
//printf("ignore_label_ = %d\n",ignore_label_);
//printf("has_ignore_label_ = %d\n",has_ignore_label_);
}*/
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
Dtype weight = weights[n * spatial_dim + s];
//printf("weight = %g\n",weight);
loss[index] = weight *
-log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = weight;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
if( bottom.size() == 2) {
// original version with equally weighted pixels
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
} else {
// version with pixel-wise loss weights using a third input blob
hipLaunchKernelGGL(( WeightedSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label,
bottom[2]->gpu_data(), loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if ( (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) || bottom.size() == 3) {
caffe_gpu_asum(nthreads, counts, &valid_count);
if( valid_count == 0 ) {
LOG(INFO) << this->type()
<< " warning (Forward_gpu): sum of pixel wise loss weights is zero!";
}
}
if ( valid_count == 0) {
top[0]->mutable_cpu_data()[0] = 0.;
} else {
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads,
const Dtype* top, const Dtype* label, const Dtype* weights,
Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
Dtype weight = weights[n * spatial_dim + s];
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= weight;
}
counts[index] = weight;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
if( bottom.size() == 2) {
// original version with equally weighted pixels
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
counts);
} else {
hipLaunchKernelGGL(( WeightedSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label,
bottom[2]->gpu_data(), bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
counts);
}
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if ( (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) || (bottom.size() == 3) ) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype loss_weight = 0;
if( valid_count == 0) {
LOG(INFO) << this->type()
<< " warning (Backward_gpu): sum of pixel wise loss weights is zero!";
} else {
loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
}
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| c827b15c052428204f51ba6beba1cd0d2b38a1ff.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
printf("label_value = %g\n",label_value);
printf("ignore_label_ = %g\n",ignore_label_);
printf("has_ignore_label_ = %d\n",has_ignore_label_);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const Dtype* weights,
Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
/*if (label_value>0 && label_value != ignore_label_){
printf("label_value = %d\n",label_value);
//printf("ignore_label_ = %d\n",ignore_label_);
//printf("has_ignore_label_ = %d\n",has_ignore_label_);
}*/
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
Dtype weight = weights[n * spatial_dim + s];
//printf("weight = %g\n",weight);
loss[index] = weight *
-log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = weight;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
if( bottom.size() == 2) {
// original version with equally weighted pixels
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
} else {
// version with pixel-wise loss weights using a third input blob
WeightedSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label,
bottom[2]->gpu_data(), loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if ( (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) || bottom.size() == 3) {
caffe_gpu_asum(nthreads, counts, &valid_count);
if( valid_count == 0 ) {
LOG(INFO) << this->type()
<< " warning (Forward_gpu): sum of pixel wise loss weights is zero!";
}
}
if ( valid_count == 0) {
top[0]->mutable_cpu_data()[0] = 0.;
} else {
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads,
const Dtype* top, const Dtype* label, const Dtype* weights,
Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
Dtype weight = weights[n * spatial_dim + s];
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= weight;
}
counts[index] = weight;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
if( bottom.size() == 2) {
// original version with equally weighted pixels
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
counts);
} else {
WeightedSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label,
bottom[2]->gpu_data(), bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
counts);
}
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if ( (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) || (bottom.size() == 3) ) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype loss_weight = 0;
if( valid_count == 0) {
LOG(INFO) << this->type()
<< " warning (Backward_gpu): sum of pixel wise loss weights is zero!";
} else {
loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
}
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
756b38f8946b3fb0cfd1652cc430c2b872ef43b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <helper_cuda.h>
#include "../scene/world.h"
#include "../bvh/bvh.h"
/* BASIC OPERATIONS */
__device__ __host__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b) << 16) | (int(g) << 8) | int(r);
}
__device__ __host__ glm::vec3 Inttorgb(int x)
{
glm::vec3 rgb;
rgb.b = (x >> 16);
rgb.g = (x >> 8) & 0xff;
rgb.r = (x & 0xff);
return rgb;
}
__device__ __host__
glm::vec3 fetchTex(glm::vec2& uv, int objIndex, uchar3* imagesBuffer, glm::ivec3* imageOffsetBuffer)
{
glm::ivec3& info = imageOffsetBuffer[objIndex];
int offset = info.x;
int iy = info.y;
int ix = info.z;
float x = uv.x;
float y = uv.y;
if (x < 0)
x = x - (int)x + 1;
if (y < 0)
y = y - (int)y + 1;
if (x >= 1)
x = x - (int)x;
if (y >= 1)
y = y - (int)y;
x *= ix;
y *= iy;
int lx = x, ly = y;
int rx = lx + 1, ry = ly + 1;
float wx = x - lx, wy = y - ly;
if (lx < 0)
lx += wx;
if (ly < 0)
ly += wy;
if (rx >= ix)
rx -= ix;
if (ry >= iy)
ry -= iy;
int ind1 = offset + ly * ix + lx;
int ind2 = offset + ly * ix + rx;
int ind3 = offset + ry * ix + lx;
int ind4 = offset + ry * ix + rx;
uchar3& c1 = imagesBuffer[ind1];
uchar3& c2 = imagesBuffer[ind2];
uchar3& c3 = imagesBuffer[ind3];
uchar3& c4 = imagesBuffer[ind4];
float cx = (c1.x * (1 - wx) + c2.x * wx) * (1 - wy) + (c3.x * (1 - wx) + c4.x * wx) * wy;
float cy = (c1.y * (1 - wx) + c2.y * wx) * (1 - wy) + (c3.y * (1 - wx) + c4.y * wx) * wy;
float cz = (c1.z * (1 - wx) + c2.z * wx) * (1 - wy) + (c3.z * (1 - wx) + c4.z * wx) * wy;
return glm::vec3(cz, cy, cx);
}
__device__ __host__
glm::vec3 fetchEnvironment(glm::vec3 ray_d, int imgh, int imgw, uchar3* imagesBuffer)
{
float theta = acos(ray_d.y) / CV_PI * imgh;
float phi = atan2(ray_d.z, ray_d.x) / CV_PI / 2.0;
if (phi < 0)
phi += 1;
phi += 0.25;
if (phi > 1)
phi -= 1;
phi *= imgw;
int lx = phi, ly = theta;
int rx = lx + 1, ry = ly + 1;
float wx = phi - lx, wy = theta - ly;
if (rx >= imgw)
rx -= imgw;
if (ry >= imgh)
ry -= imgh;
int ind1 = ly * imgw + lx;
int ind2 = ly * imgw + rx;
int ind3 = ry * imgw + lx;
int ind4 = ry * imgw + rx;
uchar3& c1 = imagesBuffer[ind1];
uchar3& c2 = imagesBuffer[ind2];
uchar3& c3 = imagesBuffer[ind3];
uchar3& c4 = imagesBuffer[ind4];
float cx = (c1.x * (1 - wx) + c2.x * wx) * (1 - wy) + (c3.x * (1 - wx) + c4.x * wx) * wy;
float cy = (c1.y * (1 - wx) + c2.y * wx) * (1 - wy) + (c3.y * (1 - wx) + c4.y * wx) * wy;
float cz = (c1.z * (1 - wx) + c2.z * wx) * (1 - wy) + (c3.z * (1 - wx) + c4.z * wx) * wy;
return glm::vec3(cz, cy, cx);
}
/* Intersections */
__device__ __host__
float rayIntersectsTriangle(glm::vec3& p, glm::vec3& d,
glm::vec3& v0, glm::vec3& v1, glm::vec3& v2, float& u, float& v) {
glm::vec3 e1 = v1 - v0;
glm::vec3 e2 = v2 - v0;
glm::vec3 h = glm::cross(d, e2);
float a = glm::dot(e1, h);
if (a > -0.00001 && a < 0.00001)
return -1;
float f = 1 / a;
glm::vec3 s = p - v0;
u = f * glm::dot(s, h);
if (u < -1e-3 || u > 1 + 1e-3)
return -1;
glm::vec3 q = glm::cross(s, e1);
v = f * glm::dot(d, q);
if (v < -1e-3 || u + v > 1 + 1e-3)
return -1;
// at this stage we can compute t to find out where
// the intersection point is on the line
float t = f * glm::dot(e2, q);
if (t > 0.00001) // ray intersection
return t;
else // this means that there is a line intersection
// but not a ray intersection
return -1;
}
__device__ __host__
void swaps(float& x, float& y) {
float t = x;
x = y, y = t;
}
__device__ __host__
int BoundingBoxIntersect(glm::vec3& ray_o, glm::vec3& ray_t, glm::vec3& minP, glm::vec3& maxP) {
auto r = ray_t + glm::vec3(1e-6, 1e-6, 1e-6);
auto rinv = glm::vec3(1 / r.x, 1 / r.y, 1 / r.z);
float tx1 = (minP.x - ray_o.x)*rinv.x;
float tx2 = (maxP.x - ray_o.x)*rinv.x;
float tmin, tmax;
if (rinv.x > 0)
tmin = tx1, tmax = tx2;
else
tmin = tx2, tmax = tx1;
float ty1 = (minP.y - ray_o.y)*rinv.y;
float ty2 = (maxP.y - ray_o.y)*rinv.y;
if (rinv.y > 0)
tmin = max(tmin, ty1),
tmax = min(tmax, ty2);
else
tmin = max(tmin, ty2),
tmax = min(tmax, ty1);
float tz1 = (minP.z - ray_o.z)*rinv.z;
float tz2 = (maxP.z - ray_o.z)*rinv.z;
if (rinv.z > 0)
tmin = max(tmin, tz1),
tmax = min(tmax, tz2);
else
tmin = max(tmin, tz2),
tmax = min(tmax, tz1);
return tmax >= tmin;
}
/* BVH Intersection */
__device__ __host__
int bvh_index(BVHData* bvh_node) {
return 3 * bvh_node->start_id;
}
__device__ __host__
int bvh_left(BVHData* bvh_node) {
return bvh_node->left_id;
}
__device__ __host__
int bvh_right(BVHData* bvh_node) {
return bvh_node->right_id;
}
__device__ __host__
int bvh_parent(BVHData* bvh_node) {
return bvh_node->parent_id;
}
__device__ __host__
int bvh_dir(BVHData* bvh_node, glm::vec3& ray) {
int axis = bvh_node->axis;
if (axis == 0)
return ray.x > 0;
if (axis == 1)
return ray.y > 0;
return ray.z <= 0;
}
__device__ __host__
float box_intersect(BVHData* bvh_node, glm::vec3& ray_o, glm::vec3& ray_t) {
float step = 1.0 / 11;
float tmp = 0;
float a1, a2, b1, b2, c1, c2;
a1 = (bvh_node->minCorner.x - ray_o.x);
a2 = (bvh_node->maxCorner.x - ray_o.x);
if (ray_t.x < 1e-6 && ray_t.x > -1e-6) {
if (a1 * a2 > 1e-4)
return -1;
a1 = -1e30; a2 = 1e30;
}
else {
a1 /= ray_t.x;
a2 /= ray_t.x;
}
if (a1 > a2) {
tmp = a1; a1 = a2; a2 = tmp;
}
b1 = (bvh_node->minCorner.y - ray_o.y);
b2 = (bvh_node->maxCorner.y - ray_o.y);
if (ray_t.y < 1e-6 && ray_t.y > -1e-6) {
if (b1 * b2 > 1e-4)
return -1;
b1 = -1e30; b2 = 1e30;
}
else {
b1 /= ray_t.y;
b2 /= ray_t.y;
}
if (b1 > b2) {
tmp = b1; b1 = b2; b2 = tmp;
}
c1 = (bvh_node->minCorner.z - ray_o.z);
c2 = (bvh_node->maxCorner.z - ray_o.z);
if (ray_t.z < 1e-6 && ray_t.z > -1e-6) {
if (c1 * c2 > 1e-4)
return -1;
c1 = -1e30; c2 = 1e30;
}
else {
c1 /= ray_t.z;
c2 /= ray_t.z;
}
if (c1 > c2) {
tmp = c1; c1 = c2; c2 = tmp;
}
float t1, t2;
t1 = max(a1, max(b1, c1));
t2 = min(a2, min(b2, c2));
if (t2 >= t1 && t2 >= 0)
return (t1 > 0) ? t1 : 0;
else
return -1;
}
__device__ __host__
float bvh_intersect(glm::vec3& ray_o, glm::vec3& ray_t, int& index, float& u, float& v,
glm::vec3* vertexBuffer, BVHData* bvh) {
float depth = 1e30;
index = -1;
BVHData* bvh_node = bvh;
BVHData* last_node = 0;
float u1, v1;
int t = 0;
while (bvh_node >= bvh) {
t += 1;
if (last_node == 0) {
float cur_depth = box_intersect(bvh_node, ray_o, ray_t);
if (cur_depth < 0 || cur_depth > depth) {
last_node = bvh_node;
bvh_node = bvh + bvh_parent(bvh_node);
continue;
}
if (bvh_left(bvh_node) < 0) {
int cur_index = bvh_index(bvh_node);
cur_depth = rayIntersectsTriangle(ray_o, ray_t, vertexBuffer[cur_index],
vertexBuffer[cur_index + 1], vertexBuffer[cur_index + 2], u1, v1);
if (cur_depth >= 0 && cur_depth < depth) {
index = cur_index;
u = u1;
v = v1;
depth = cur_depth;
}
last_node = bvh_node;
bvh_node = bvh + bvh_parent(bvh_node);
continue;
}
else {
last_node = 0;
if (bvh_dir(bvh_node, ray_t)) {
bvh_node = bvh + bvh_left(bvh_node);
}
else {
bvh_node = bvh + bvh_right(bvh_node);
}
}
}
else {
bool dir = bvh_dir(bvh_node, ray_t);
BVHData* left_node = bvh + bvh_left(bvh_node);
BVHData* right_node = bvh + bvh_right(bvh_node);
if (dir && left_node == last_node) {
last_node = 0;
bvh_node = bvh + bvh_right(bvh_node);
}
else
if (!dir && right_node == last_node) {
last_node = 0;
bvh_node = bvh + bvh_left(bvh_node);
}
else {
last_node = bvh_node;
bvh_node = bvh + bvh_parent(bvh_node);
}
}
}
return depth;
}
/* Tracing Algorithm */
//#define BVH_
__device__ __host__
float tracing(glm::vec3& ray_o, glm::vec3& ray_t, float shadow, int& tri, int& obj, glm::vec3& hit_point, glm::vec2& uv, glm::vec3& normal,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
BVHData* bvh) {
#ifndef BVH_
float depth = 1e30;
obj = -1;
tri = -1;
int j = 0;
for (int k = 0; k < num_object; ++k) {
int next_object = instanceData[k].s;
if ((k < 1 || k >= 7) && !BoundingBoxIntersect(ray_o, ray_t, instanceData[k].minPos, instanceData[k].maxPos)) {
j = next_object;
continue;
}
while (j < next_object) {
glm::vec3& v1 = vertexBuffer[j];
glm::vec3& v2 = vertexBuffer[j + 1];
glm::vec3& v3 = vertexBuffer[j + 2];
float u, v;
float t = rayIntersectsTriangle(ray_o, ray_t, v1, v2, v3, u, v);
if (t > 1e-2 && t < depth) {
depth = t;
hit_point = ((ray_o + ray_t * depth));
if (shadow >= 0) {
if (t < shadow) {
return t;
}
}
else {
obj = k;
tri = j;
glm::vec3& n1 = normalBuffer[j];
glm::vec3& n2 = normalBuffer[j + 1];
glm::vec3& n3 = normalBuffer[j + 2];
normal = u * (n2 - n1) + v * (n3 - n1) + n1;
glm::vec2& uv1 = texBuffer[j];
glm::vec2& uv2 = texBuffer[j + 1];
glm::vec2& uv3 = texBuffer[j + 2];
uv = uv1 + u * (uv2 - uv1) + v * (uv3 - uv1);
}
}
j += 3;
}
}
normal = normalize(normal);
return depth;
#else
float depth = 1e30;
obj = -1;
tri = -1;
for (int k = 0; k < num_object; ++k) {
int index;
float u, v;
float t = bvh_intersect(ray_o, ray_t, index, u, v, vertexBuffer, bvh + instanceData[k].bvh_offset);
if (t > 1e-2 && t < depth) {
depth = t;
hit_point = ((ray_o + ray_t * depth));
if (shadow >= 0) {
if (t < shadow) {
return t;
}
}
else {
obj = k;
tri = index;
glm::vec3& n1 = normalBuffer[tri];
glm::vec3& n2 = normalBuffer[tri + 1];
glm::vec3& n3 = normalBuffer[tri + 2];
normal = u * (n2 - n1) + v * (n3 - n1) + n1;
glm::vec2& uv1 = texBuffer[tri];
glm::vec2& uv2 = texBuffer[tri + 1];
glm::vec2& uv3 = texBuffer[tri + 2];
uv = uv1 + u * (uv2 - uv1) + v * (uv3 - uv1);
}
}
}
normal = normalize(normal);
return depth;
#endif
}
__device__ __host__
glm::vec3 lighting(glm::vec3& start_camera, glm::vec3& point, glm::vec3& normal, int tri_index, glm::vec2& uv, int obj_index,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
int num_direct_light, glm::vec3* direct_lights, glm::vec3* direct_lights_color,
int num_point_light, glm::vec3* point_lights, glm::vec3* point_lights_color, glm::vec3& ambient,
uchar3* imagesBuffer, glm::ivec3* imageOffsetBuffer, glm::vec3& orig_color, glm::vec3* causticMap, BVHData* bvh, float depth, uchar3* environment,
glm::vec3& ray_t, glm::vec3* scatterMap, glm::vec3* scatterPosMap, float* shadowMap, int render_mode) {
float kd = instanceData[obj_index].kd;
float ks = instanceData[obj_index].ks;//texture2D(materialSampler, vec2(1.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r;
float ka = instanceData[obj_index].ka;// texture2D(materialSampler, vec2(16.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r;
float alpha = instanceData[obj_index].alpha;// texture2D(materialSampler, vec2(20.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r;
if (depth > 1000)
orig_color = fetchEnvironment(ray_t, 3000, 6000, environment);
else
orig_color = fetchTex(uv, obj_index, imagesBuffer, imageOffsetBuffer);
// int tex = int(0.1 + texture2D(materialSampler, vec2(2.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r);
// orig_color = texture2D(renderSampler[tex], uv).rgb;
glm::vec3 color = ka * orig_color * ambient;
glm::vec3 eye_dir = normalize(start_camera - point);
int t1, t2;
glm::vec2 v1;
glm::vec3 v2, v3;
for (int i = 0; i < num_direct_light; ++i) {
float intensity = glm::dot(-direct_lights[i], normal) * glm::dot(eye_dir, normal);
if (intensity < 0)
continue;
float depth = tracing(point, -direct_lights[i], 100, t1, t2, v2, v1, v3, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (obj_index == 0) {
float rx = (point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float ry = (point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
if (rx >= 0 && ry >= 0 && rx < CAUSTIC_W && ry < CAUSTIC_W) {
int lx = rx, ly = ry;
int rrx = lx + 1, rry = ly + 1;
float wx = rx - lx, wy = ry - ly;
float shadow1 = shadowMap[ly * CAUSTIC_W + lx];
float shadow2 = shadowMap[ly * CAUSTIC_W + rrx];
float shadow3 = shadowMap[rry * CAUSTIC_W + lx];
float shadow4 = shadowMap[rry * CAUSTIC_W + rrx];
float shadow = (shadow1 * (1 - wx) + shadow2 * wx) * (1 - wy) + (shadow3 * (1 - wx) + shadow4 * wx) * wy;
intensity *= shadow;
} else
continue;
}
color += intensity * (orig_color * direct_lights_color[i] * kd
+ clamp((float)pow(glm::dot(glm::reflect(direct_lights[i], normal), eye_dir), alpha), 0.0f, 1.f) * ks * direct_lights_color[i]);
}
for (int i = 0; i < num_point_light; ++i) {
glm::vec3 dis = point - point_lights[i];
float len = glm::length(dis);
float l = 1 / (len * len);
dis = normalize(dis);
float intensity = glm::dot(-dis, normal) * glm::dot(eye_dir, normal);
if (intensity < 0)
continue;
float depth = tracing(point, -dis, len, t1, t2, v2, v1, v3, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (depth < len)
continue;
glm::vec3 para = kd * l * point_lights_color[i];
color = color + intensity * (orig_color * para
+ clamp((float)pow(dot(reflect(dis, normal), eye_dir), alpha), 0.f, 1.f) * ks * point_lights_color[i]);
}
if (obj_index == 0) {
float rx = (point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float ry = (point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
if (rx < CAUSTIC_W - 1 && ry < CAUSTIC_W-1 && rx >= 0 && ry >= 0) {
int lx = rx, ly = ry;
int rrx = lx + 1, rry = ly + 1;
float wx = rx - lx, wy = ry - ly;
glm::vec3& caustic1 = causticMap[ly * CAUSTIC_W + lx];
glm::vec3& caustic2 = causticMap[ly * CAUSTIC_W + rrx];
glm::vec3& caustic3 = causticMap[rry * CAUSTIC_W + lx];
glm::vec3& caustic4 = causticMap[rry * CAUSTIC_W + rrx];
glm::vec3 caustic = (caustic1 * (1 - wx) + caustic2 * wx) * (1 - wy) + (caustic3 * (1 - wx) + caustic4 * wx) * wy;
if (render_mode == 2) {
color = caustic;
return color;
}
color = color + glm::dot(eye_dir, normal) * kd * caustic;
float max_v = max(max(color.x, color.y), color.z) / 255.0f;
if (max_v > 1)
color /= max_v;
}
}
if (instanceData[obj_index].kt > 1e-3) {
float radius = 0.4f;
glm::vec3 lightp = point + -point.y / direct_lights[0].y * direct_lights[0];
float x = (lightp.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float y = (lightp.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
int tx = x, ty = y;
if (tx < 0 || tx >= CAUSTIC_W || ty < 0 || ty >= CAUSTIC_W)
return glm::vec3(0,0,0);
int bandwidth = radius / abs(direct_lights[0].y) / CAUSTIC_MAP_DIS / 2;
glm::vec3 lights(0, 0, 0);
for (int ly = y - bandwidth; ly <= y + bandwidth; ++ly) {
for (int lx = x - bandwidth; lx <= x + bandwidth; ++lx) {
if (ly < 0 || ly >= CAUSTIC_W || lx < 0 || lx >= CAUSTIC_W)
continue;
float r = glm::length(point - scatterPosMap[ly * CAUSTIC_W + lx]);
float weight = exp(-(r*r) / (radius*radius * 2)) * 2.5e-3 / (radius * radius);
lights += weight * scatterMap[ly * CAUSTIC_W + lx];
}
}
if (render_mode == 5) {
return (lights * 255.0f);
}
lights.x = clamp(lights.x, 0.f, 1.f);
lights.y = clamp(lights.y, 0.f, 1.f);
lights.z = clamp(lights.z, 0.f, 1.f);
color += lights * orig_color;
color.x = clamp(color.x, 0.0f, 255.f);
color.y = clamp(color.y, 0.0f, 255.f);
color.z = clamp(color.z, 0.0f, 255.f);
}
return color;
}
__global__ void
render(unsigned int *g_odata, int imgw, int imgh,
glm::vec3 cam_up, glm::vec3 cam_forward, glm::vec3 right, glm::vec3 cam_pos, float dis_per_pix,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
int num_direct_lights, glm::vec3* direct_lights, glm::vec3* direct_lights_color,
int num_point_lights, glm::vec3* point_lights, glm::vec3* point_lights_color, glm::vec3 ambient,
uchar3* imagesBuffer, glm::ivec3* imageOffsetBuffer,
glm::vec3* causticMap, BVHData* bvh, uchar3* environment, glm::vec3* scatterMap, glm::vec3* scatterPos, float* shadowMap, int render_mode)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
if (render_mode == 1) {
g_odata[y * imgw + x] = rgbToInt(causticMap[y * imgw + x].x, causticMap[y * imgw + x].y, causticMap[y * imgw + x].z);
return;
}
if (render_mode == 4) {
g_odata[y * imgw + x] = rgbToInt(scatterMap[y * imgw + x].x * 255, scatterMap[y * imgw + x].y * 255, scatterMap[y * imgw + x].z * 255);
return;
}
glm::vec3 ray_p = cam_pos;
glm::vec3 ray_d = glm::normalize(cam_forward + (x - imgw / 2) * dis_per_pix * right + (y - imgh / 2) * dis_per_pix * cam_up);
glm::vec3 color(0, 0, 0);
int tri_index, obj_index;
int path_state[PATH_DEPTH];
int mat_stack[PATH_DEPTH];
glm::vec3 light_stack[PATH_DEPTH];
glm::vec3 color_stack[PATH_DEPTH];
glm::vec3 from_stack[PATH_DEPTH];
glm::vec3 to_stack[PATH_DEPTH];
glm::vec3 normal_stack[PATH_DEPTH];
int node = 0;
path_state[node] = 0;
from_stack[node] = ray_p;
to_stack[node] = ray_d;
color_stack[node] = glm::vec3(0, 0, 0);
light_stack[node] = glm::vec3(0, 0, 0);
float nr;
int hit_mat = 0;
glm::vec3 hit_point;
glm::vec2 uv;
glm::vec3 normal;
while (node >= 0) {
if (path_state[node] == 0) {
path_state[node] = 1;
float depth;
depth = tracing(from_stack[node], to_stack[node], -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (depth < 1e20) {
glm::vec3 orig_color;
light_stack[node] = lighting(from_stack[node], hit_point, normal, tri_index, uv, obj_index, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object,
num_direct_lights, direct_lights, direct_lights_color, num_point_lights, point_lights, point_lights_color, ambient,
imagesBuffer, imageOffsetBuffer, orig_color, causticMap, bvh, depth, environment, to_stack[node], scatterMap, scatterPos, shadowMap, render_mode);
color_stack[node] = orig_color;
normal_stack[node] = normal;
ray_d = to_stack[node];
to_stack[node] = hit_point;
mat_stack[node] = obj_index;
float kr = instanceData[obj_index].kr;
if (kr > 0 && node < PATH_DEPTH - 1) {
color_stack[node] = instanceData[obj_index].kr * color_stack[node];
node += 1;
path_state[node] = 0;
from_stack[node] = hit_point;
to_stack[node] = ray_d - 2 * glm::dot(ray_d, normal) * normal;
light_stack[node] = glm::vec3(0, 0, 0);
continue;
}
}
else {
path_state[node] = 3;
}
}
if (path_state[node] == 1) {
path_state[node] = 2;
obj_index = mat_stack[node];
float kf = instanceData[obj_index].kf;
if (kf > 0 && node < PATH_DEPTH - 1) {
nr = instanceData[obj_index].nr;
normal = normal_stack[node];
ray_d = glm::normalize(to_stack[node] - from_stack[node]);
float cost = glm::dot(normal, ray_d);
if (cost < 0) {
nr = 1 / nr;
cost = -cost;
}
else {
normal = -normal;
}
float rootContent = 1 - nr * nr * (1 - cost * cost);
if (rootContent >= 0) {
color_stack[node] = instanceData[obj_index].kf * color_stack[node];
rootContent = sqrt(rootContent);
node += 1;
path_state[node] = 0;
from_stack[node] = to_stack[node - 1];
to_stack[node] = (nr * cost - rootContent) * normal + nr * ray_d;
light_stack[node] = glm::vec3(0, 0, 0);
continue;
}
else {
float kr = 1;
if (kr > 0 && node < PATH_DEPTH - 1) {
light_stack[node] = glm::vec3(0, 0, 0);
node += 1;
path_state[node] = 0;
from_stack[node] = to_stack[node - 1];
to_stack[node] = ray_d - 2 * glm::dot(ray_d, normal) * normal;
light_stack[node] = glm::vec3(0, 0, 0);
continue;
}
else {
g_odata[y*imgw + x] = 0;
return;
}
}
}
}
if (path_state[node] == 2) {
path_state[node] = 3;
obj_index = mat_stack[node];
}
if (path_state[node] == 3) {
if (node == 0)
break;
int obj_index = mat_stack[node - 1];
if (path_state[node - 1] == 1) {
light_stack[node - 1] = (1 - instanceData[obj_index].kr) * light_stack[node - 1]
+ color_stack[node - 1] * light_stack[node] / 255.0f;
}
else
if (path_state[node - 1] == 2) {
light_stack[node - 1] = (1 - instanceData[obj_index].kf) * light_stack[node - 1]
+ color_stack[node - 1] * light_stack[node] / 255.0f;
}
else {
hit_mat -= 1;
normal = normal_stack[node - 1];
ray_d = glm::normalize(to_stack[node - 1] - from_stack[node - 1]);
float alpha = instanceData[obj_index].alpha;
light_stack[node - 1] = (1 - instanceData[obj_index].ks) * light_stack[node - 1]
+ instanceData[obj_index].ks * color_stack[node - 1] * light_stack[node] * glm::dot(-ray_d, normal) / 255.0f;
}
node -= 1;
}
}
uchar4 c4 = make_uchar4(light_stack[0].r, light_stack[0].g, light_stack[0].b, 255);
g_odata[y*imgw + x] = rgbToInt(c4.x, c4.y, c4.z);
}
/* Filtering */
__global__ void
filter(unsigned int *g_odata, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int id = y * imgw + x;
if (g_odata[id] == 0) {
glm::vec3 rgb(0, 0, 0);
int count = 0;
for (int dx = -5; dx <= 5; ++dx) {
for (int dy = -5; dy <= 5; ++dy) {
int nx = x + dx;
int ny = y + dy;
if (nx >= 0 && nx < imgw && ny >= 0 && ny < imgh) {
int nid = ny * imgw + nx;
if (g_odata[nid] != 0) {
count += 1;
rgb += Inttorgb(g_odata[nid]);
}
}
}
}
if (count > 0)
g_odata[id] = rgbToInt(rgb.r / count, rgb.g / count, rgb.b / count);
else
g_odata[id] = rgbToInt(0, 0, 0);
}
}
__global__ void
FilterCaustic(glm::ivec3* causticMap, glm::vec3* causticBuffer, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int id = y * imgw + x;
auto& pix = causticMap[id];
int temp[3][3] =
{
{ 1, 2, 1 },
{ 2, 4, 2 },
{ 1, 2, 1 }
};
if (pix.x == 0 && pix.y == 0 && pix.z == 0 || true) {
glm::ivec4 pt;
for (int py = y - 1; py <= y + 1; ++py) {
if (py < 0 || py >= imgh)
continue;
for (int px = x - 1; px <= x + 1; ++px) {
if (px < 0 || px >= imgw)
continue;
int dy = py - y + 1;
int dx = px - x + 1;
auto& p = causticMap[py * imgw + px];
if (p.x != 0 || p.y != 0 || p.z != 0) {
pt += glm::ivec4(p, 1) * temp[dy][dx];
}
}
}
if (pt.w > 0)
causticBuffer[id] = glm::vec3((float)pt.x / pt.w, (float)pt.y / pt.w, (float)pt.z / pt.w);
else
causticBuffer[id] = glm::vec3(0, 0, 0);
}
else {
causticBuffer[id] = glm::vec3(pix.x, pix.y, pix.z);
}
}
/* Caustic Rendering */
__device__ __host__
glm::vec3 projectCaustic(glm::vec3& ray_o, glm::vec3& ray_t, glm::vec3 &color,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
glm::vec3& light, glm::vec2& coords, uchar3* texImages, glm::ivec3* imageOffsets, BVHData* bvh, glm::vec3& scatterPos, float& softShadow) {
int tri_index, obj_index;
glm::vec3 hit_point, normal;
glm::vec2 uv;
float depth = tracing(ray_o, ray_t, -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (obj_index == 0)
softShadow = 1;
else
softShadow = 0;
glm::vec3 orig_color = fetchTex(uv, obj_index, texImages, imageOffsets) / 255.0f;
int steps = 0;
float intensity = 1;
while (depth < 1e20 && (instanceData[obj_index].kr > 1e-3 || instanceData[obj_index].kf > 1e-3 || instanceData[obj_index].kt > 1e-3)) {
if (instanceData[obj_index].kt > 1e-3) {
float x = (hit_point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float y = (hit_point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
scatterPos = hit_point;
return color * -glm::dot(normal, ray_t);
}
if (instanceData[obj_index].kf != 0) {
float nr = instanceData[obj_index].nr;
float cost = glm::dot(normal, ray_t);
if (cost < 0) {
nr = 1 / nr;
cost = -cost;
}
else {
normal = -normal;
}
float rootContent = 1 - nr * nr * (1 - cost * cost);
if (rootContent >= 0) {
ray_o = glm::vec3(hit_point.x, hit_point.y, hit_point.z);
ray_t = (nr * cost - sqrt(rootContent)) * normal + nr * ray_t;
intensity *= instanceData[obj_index].kf * 0.6;
}
else {
ray_o = glm::vec3(hit_point.x, hit_point.y, hit_point.z);
ray_t = glm::reflect(ray_t, glm::vec3(normal.x, normal.y, normal.z));
}
}
else if (instanceData[obj_index].kr != 0) {
ray_o = glm::vec3(hit_point.x, hit_point.y, hit_point.z);
ray_t = glm::reflect(ray_t, glm::vec3(normal.x, normal.y, normal.z));
intensity *= instanceData[obj_index].kr;
}
steps++;
if (steps > 2)
break;
depth = tracing(ray_o, ray_t, -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
}
if (obj_index == 0 && steps > 0) {
float x = (hit_point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float y = (hit_point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
coords = glm::vec2(x, y);
light = intensity * color * orig_color;
}
else {
coords = glm::vec2(-1, -1);
light = glm::vec3(0, 0, 0);
}
return glm::vec3(0, 0, 0);
}
__global__ void
ClearBuffer(glm::ivec3 *g_odata, glm::vec3* g_light, float* shadow, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
g_odata[y * imgw + x] = glm::ivec3(0, 0, 0);
g_light[y * imgw + x] = glm::vec3(0, 0, 0);
shadow[y * imgw + x] = 0;
}
__global__ void
FilterShadow(float* input, float* output, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int total = 0;
float shad = 0;
for (int dy = y - 3; dy <= y + 3; ++dy) {
for (int dx = x - 3; dx <= x + 3; ++dx) {
if (dy >= 0 && dy < imgh && dx >= 0 && dx < imgw)
{
shad += input[dy * imgw + dx];
total += 1;
}
}
}
if (total != 0)
output[y * imgw + x] = shad / total;
else
output[y * imgw + x] = 0;
}
__global__ void
CausticRender(glm::vec3 *causticMap, glm::vec2* cuasticCoords, int imgw, int imgh,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
glm::vec3 dir, glm::vec3 color, uchar3* texImages, glm::ivec3* imageOffsets, BVHData* bvh, glm::vec3* scatterBuffer, glm::vec3* scatterPos, float* softShadow) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
glm::vec3 point(x * CAUSTIC_MAP_DIS + CAUSTIC_X_MIN, 0, y * CAUSTIC_MAP_DIS + CAUSTIC_X_MIN);
scatterBuffer[y * CAUSTIC_W + x] = projectCaustic(point - dir * 1000.0f, dir, color, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object,
causticMap[y * CAUSTIC_W + x], cuasticCoords[y * imgw + x], texImages, imageOffsets, bvh, scatterPos[y * CAUSTIC_W + x], softShadow[y * CAUSTIC_W + x]);
}
__global__ void
combineCaustic(unsigned int *g_odata, glm::ivec3* causticMap, int imgw, int imgh,
glm::vec3 cam_up, glm::vec3 cam_forward, glm::vec3 right, glm::vec3 cam_pos, float dis_per_pix,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object, BVHData* bvh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
glm::vec3 ray_p = cam_pos;
glm::vec3 ray_d = glm::normalize(cam_forward + (x - imgw / 2) * dis_per_pix * right + (y - imgh / 2) * dis_per_pix * cam_up);
glm::vec3 color(0, 0, 0);
int tri_index, obj_index;
glm::vec3 hit_point, normal;
glm::vec2 uv;
float depth;
depth = tracing(ray_p, ray_d, -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (obj_index == 0) {
int rx = (hit_point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
int ry = (hit_point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
if (rx < CAUSTIC_W && ry < CAUSTIC_W && rx >= 0 && ry >= 0) {
auto& p = causticMap[ry * CAUSTIC_W + rx];
glm::vec3 np = Inttorgb(g_odata[y * imgw + x]);
np += p;
np.x = clamp(np.x, 0.f, 255.f);
np.y = clamp(np.y, 0.f, 255.f);
np.z = clamp(np.z, 0.f, 255.f);
np = glm::vec3(255, 0, 0);
g_odata[y * imgw + x] = rgbToInt(p.x, p.y, p.z);
}
}
}
__global__ void
SplatCaustic(glm::vec3* caustics, glm::vec2* causticCoords, glm::ivec3* causticMaps, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int ry = y + 1, rx = x + 1;
if (ry < imgh && rx < imgw) {
int id[4];
id[0] = y * imgw + x;
id[1] = id[0] + 1;
id[2] = id[0] + imgw;
id[3] = id[2] + 1;
float minX = 1e20f, maxX = -1e20f, minY = 1e20f, maxY = -1e20f;
for (int i = 0; i < 4; ++i) {
auto& p = causticCoords[id[i]];
if (causticCoords[id[i]].x < 0)
return;
if (p.x < minX)
minX = p.x;
if (p.x > maxX)
maxX = p.x;
if (p.y < minY)
minY = p.y;
if (p.y > maxY)
maxY = p.y;
}
if (maxX - minX > 15 || maxY - minY > 15)
return;
int stepX = (maxX - minX) + 1;
int stepY = (maxY - minY) + 1;
int steps;
if (stepX > stepY)
steps = stepX;
else
steps = stepY;
if (steps == 1)
steps += 1;
// steps *= 2;
float weight = 255.0 / (steps * steps);
float stepW = 1.0 / (steps - 1);
for (int i = 0; i < steps; ++i) {
for (int j = 0; j < steps; ++j) {
float wx = stepW * j;
float wy = stepW * i;
glm::vec3 interp = (caustics[id[0]] * (1 - wx) + caustics[id[1]] * wx) * (1 - wy)
+ (caustics[id[2]] * (1 - wx) + caustics[id[3]] * wx) * wy;
glm::vec2 interp_coords = (causticCoords[id[0]] * (1 - wx) + causticCoords[id[1]] * wx) * (1 - wy)
+ (causticCoords[id[2]] * (1 - wx) + causticCoords[id[3]] * wx) * wy;
int nx = interp_coords.x, ny = interp_coords.y;
if (nx >= 0 && nx < imgw && ny >= 0 && ny < imgh) {
atomicAdd(&causticMaps[ny * imgw + nx].x, interp.x * weight);
atomicAdd(&causticMaps[ny * imgw + nx].y, interp.y * weight);
atomicAdd(&causticMaps[ny * imgw + nx].z, interp.z * weight);
}
}
}
}
}
/* GPU Render Entry */
__global__ void
SplatCaustic(glm::vec3* buffer, glm::vec3* map, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
}
extern "C" void
cudaRender(dim3 grid, dim3 block, int sbytes, unsigned int *g_odata, int imgw, int imgh)
{
static float count = 1;
float dis_per_pix = tan(World::fov * 0.5 * 3.141592654 / 180.0) / (imgw / 2);
glm::vec3 right = glm::cross(World::camera_lookat, World::camera_up);
dim3 grid1(CAUSTIC_W / block.x, CAUSTIC_W / block.y, 1);
static float angle = 0.0;
static float angle_dir = 1.0f;
if (g_world.pause) {
angle += angle_dir;
}
if (angle > 30.0f || angle < -30.0f)
angle_dir = -angle_dir;
float rad = angle / 180.0 * CV_PI;
glm::mat3 rot(1.0f);
rot[0][0] = cos(rad);
rot[0][2] = sin(rad);
rot[2][0] = -sin(rad);
rot[2][2] = cos(rad);
glm::vec3 new_dir = rot * g_world.lights.direct_light_dir[0];
hipMemcpy(g_world.directLightsBuffer, &new_dir, sizeof(glm::vec3), hipMemcpyHostToDevice);
ClearBuffer << < grid1, block, sbytes >> >(g_world.causticMapBuffer, g_world.scatterBuffer, g_world.softShadowBuffer, CAUSTIC_W, CAUSTIC_W);
for (int i = 0; i < g_world.lights.direct_light_dir.size(); ++i) {
CausticRender << < grid1, block, sbytes >> > (g_world.causticBuffer, g_world.causticCoordsBuffer, CAUSTIC_W, CAUSTIC_W,
g_world.materialBuffer, g_world.vertexBuffer, g_world.normalBuffer, g_world.texBuffer, g_world.num_objects,
new_dir, g_world.lights.direct_light_color[i], g_world.texImagesBuffer, g_world.texOffsetBuffer, g_world.bvhDataBuffer,
g_world.scatterBuffer, g_world.scatterPosBuffer, g_world.softShadowBuffer);
SplatCaustic << < grid1, block, sbytes >> > (g_world.causticBuffer, g_world.causticCoordsBuffer, g_world.causticMapBuffer, CAUSTIC_W, CAUSTIC_W);
FilterCaustic << < grid1, block, sbytes >> > (g_world.causticMapBuffer, g_world.causticBuffer, CAUSTIC_W, CAUSTIC_W);
FilterShadow << < grid1, block, sbytes >> > (g_world.softShadowBuffer, g_world.softShadowMap, CAUSTIC_W, CAUSTIC_W);
}
render << < grid, block, sbytes >> >(g_odata, imgw, imgh,
World::camera_up, World::camera_lookat, right, World::camera, dis_per_pix,
g_world.materialBuffer, g_world.vertexBuffer, g_world.normalBuffer, g_world.texBuffer, g_world.num_objects,
g_world.lights.direct_light_dir.size(), g_world.directLightsBuffer, g_world.directLightsColorBuffer,
g_world.lights.point_light_pos.size(), g_world.pointLightsBuffer, g_world.pointLightsColorBuffer, g_world.lights.ambient * count,
g_world.texImagesBuffer, g_world.texOffsetBuffer,
g_world.causticBuffer, g_world.bvhDataBuffer, g_world.environmentBuffer, g_world.scatterBuffer, g_world.scatterPosBuffer,
g_world.softShadowMap, g_world.rendering_mode);
if (g_world.rendering_mode == 0 || g_world.rendering_mode == 3)
filter << < grid, block, sbytes >> >(g_odata, imgw, imgh);
printf("%d\n", g_world.rendering_mode);
/* combineCaustic << < grid, block, sbytes >> >(g_odata, g_world.causticMapBuffer, imgw, imgh,
World::camera_up, World::camera_lookat, right, World::camera, dis_per_pix,
g_world.materialBuffer, g_world.vertexBuffer, g_world.normalBuffer, g_world.texBuffer, g_world.num_objects);*/
}
| 756b38f8946b3fb0cfd1652cc430c2b872ef43b1.cu | #include <helper_cuda.h>
#include "../scene/world.h"
#include "../bvh/bvh.h"
/* BASIC OPERATIONS */
__device__ __host__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b) << 16) | (int(g) << 8) | int(r);
}
__device__ __host__ glm::vec3 Inttorgb(int x)
{
glm::vec3 rgb;
rgb.b = (x >> 16);
rgb.g = (x >> 8) & 0xff;
rgb.r = (x & 0xff);
return rgb;
}
__device__ __host__
glm::vec3 fetchTex(glm::vec2& uv, int objIndex, uchar3* imagesBuffer, glm::ivec3* imageOffsetBuffer)
{
glm::ivec3& info = imageOffsetBuffer[objIndex];
int offset = info.x;
int iy = info.y;
int ix = info.z;
float x = uv.x;
float y = uv.y;
if (x < 0)
x = x - (int)x + 1;
if (y < 0)
y = y - (int)y + 1;
if (x >= 1)
x = x - (int)x;
if (y >= 1)
y = y - (int)y;
x *= ix;
y *= iy;
int lx = x, ly = y;
int rx = lx + 1, ry = ly + 1;
float wx = x - lx, wy = y - ly;
if (lx < 0)
lx += wx;
if (ly < 0)
ly += wy;
if (rx >= ix)
rx -= ix;
if (ry >= iy)
ry -= iy;
int ind1 = offset + ly * ix + lx;
int ind2 = offset + ly * ix + rx;
int ind3 = offset + ry * ix + lx;
int ind4 = offset + ry * ix + rx;
uchar3& c1 = imagesBuffer[ind1];
uchar3& c2 = imagesBuffer[ind2];
uchar3& c3 = imagesBuffer[ind3];
uchar3& c4 = imagesBuffer[ind4];
float cx = (c1.x * (1 - wx) + c2.x * wx) * (1 - wy) + (c3.x * (1 - wx) + c4.x * wx) * wy;
float cy = (c1.y * (1 - wx) + c2.y * wx) * (1 - wy) + (c3.y * (1 - wx) + c4.y * wx) * wy;
float cz = (c1.z * (1 - wx) + c2.z * wx) * (1 - wy) + (c3.z * (1 - wx) + c4.z * wx) * wy;
return glm::vec3(cz, cy, cx);
}
__device__ __host__
glm::vec3 fetchEnvironment(glm::vec3 ray_d, int imgh, int imgw, uchar3* imagesBuffer)
{
float theta = acos(ray_d.y) / CV_PI * imgh;
float phi = atan2(ray_d.z, ray_d.x) / CV_PI / 2.0;
if (phi < 0)
phi += 1;
phi += 0.25;
if (phi > 1)
phi -= 1;
phi *= imgw;
int lx = phi, ly = theta;
int rx = lx + 1, ry = ly + 1;
float wx = phi - lx, wy = theta - ly;
if (rx >= imgw)
rx -= imgw;
if (ry >= imgh)
ry -= imgh;
int ind1 = ly * imgw + lx;
int ind2 = ly * imgw + rx;
int ind3 = ry * imgw + lx;
int ind4 = ry * imgw + rx;
uchar3& c1 = imagesBuffer[ind1];
uchar3& c2 = imagesBuffer[ind2];
uchar3& c3 = imagesBuffer[ind3];
uchar3& c4 = imagesBuffer[ind4];
float cx = (c1.x * (1 - wx) + c2.x * wx) * (1 - wy) + (c3.x * (1 - wx) + c4.x * wx) * wy;
float cy = (c1.y * (1 - wx) + c2.y * wx) * (1 - wy) + (c3.y * (1 - wx) + c4.y * wx) * wy;
float cz = (c1.z * (1 - wx) + c2.z * wx) * (1 - wy) + (c3.z * (1 - wx) + c4.z * wx) * wy;
return glm::vec3(cz, cy, cx);
}
/* Intersections */
__device__ __host__
float rayIntersectsTriangle(glm::vec3& p, glm::vec3& d,
glm::vec3& v0, glm::vec3& v1, glm::vec3& v2, float& u, float& v) {
glm::vec3 e1 = v1 - v0;
glm::vec3 e2 = v2 - v0;
glm::vec3 h = glm::cross(d, e2);
float a = glm::dot(e1, h);
if (a > -0.00001 && a < 0.00001)
return -1;
float f = 1 / a;
glm::vec3 s = p - v0;
u = f * glm::dot(s, h);
if (u < -1e-3 || u > 1 + 1e-3)
return -1;
glm::vec3 q = glm::cross(s, e1);
v = f * glm::dot(d, q);
if (v < -1e-3 || u + v > 1 + 1e-3)
return -1;
// at this stage we can compute t to find out where
// the intersection point is on the line
float t = f * glm::dot(e2, q);
if (t > 0.00001) // ray intersection
return t;
else // this means that there is a line intersection
// but not a ray intersection
return -1;
}
__device__ __host__
void swaps(float& x, float& y) {
float t = x;
x = y, y = t;
}
__device__ __host__
int BoundingBoxIntersect(glm::vec3& ray_o, glm::vec3& ray_t, glm::vec3& minP, glm::vec3& maxP) {
auto r = ray_t + glm::vec3(1e-6, 1e-6, 1e-6);
auto rinv = glm::vec3(1 / r.x, 1 / r.y, 1 / r.z);
float tx1 = (minP.x - ray_o.x)*rinv.x;
float tx2 = (maxP.x - ray_o.x)*rinv.x;
float tmin, tmax;
if (rinv.x > 0)
tmin = tx1, tmax = tx2;
else
tmin = tx2, tmax = tx1;
float ty1 = (minP.y - ray_o.y)*rinv.y;
float ty2 = (maxP.y - ray_o.y)*rinv.y;
if (rinv.y > 0)
tmin = max(tmin, ty1),
tmax = min(tmax, ty2);
else
tmin = max(tmin, ty2),
tmax = min(tmax, ty1);
float tz1 = (minP.z - ray_o.z)*rinv.z;
float tz2 = (maxP.z - ray_o.z)*rinv.z;
if (rinv.z > 0)
tmin = max(tmin, tz1),
tmax = min(tmax, tz2);
else
tmin = max(tmin, tz2),
tmax = min(tmax, tz1);
return tmax >= tmin;
}
/* BVH Intersection */
__device__ __host__
int bvh_index(BVHData* bvh_node) {
return 3 * bvh_node->start_id;
}
__device__ __host__
int bvh_left(BVHData* bvh_node) {
return bvh_node->left_id;
}
__device__ __host__
int bvh_right(BVHData* bvh_node) {
return bvh_node->right_id;
}
__device__ __host__
int bvh_parent(BVHData* bvh_node) {
return bvh_node->parent_id;
}
__device__ __host__
int bvh_dir(BVHData* bvh_node, glm::vec3& ray) {
int axis = bvh_node->axis;
if (axis == 0)
return ray.x > 0;
if (axis == 1)
return ray.y > 0;
return ray.z <= 0;
}
__device__ __host__
float box_intersect(BVHData* bvh_node, glm::vec3& ray_o, glm::vec3& ray_t) {
float step = 1.0 / 11;
float tmp = 0;
float a1, a2, b1, b2, c1, c2;
a1 = (bvh_node->minCorner.x - ray_o.x);
a2 = (bvh_node->maxCorner.x - ray_o.x);
if (ray_t.x < 1e-6 && ray_t.x > -1e-6) {
if (a1 * a2 > 1e-4)
return -1;
a1 = -1e30; a2 = 1e30;
}
else {
a1 /= ray_t.x;
a2 /= ray_t.x;
}
if (a1 > a2) {
tmp = a1; a1 = a2; a2 = tmp;
}
b1 = (bvh_node->minCorner.y - ray_o.y);
b2 = (bvh_node->maxCorner.y - ray_o.y);
if (ray_t.y < 1e-6 && ray_t.y > -1e-6) {
if (b1 * b2 > 1e-4)
return -1;
b1 = -1e30; b2 = 1e30;
}
else {
b1 /= ray_t.y;
b2 /= ray_t.y;
}
if (b1 > b2) {
tmp = b1; b1 = b2; b2 = tmp;
}
c1 = (bvh_node->minCorner.z - ray_o.z);
c2 = (bvh_node->maxCorner.z - ray_o.z);
if (ray_t.z < 1e-6 && ray_t.z > -1e-6) {
if (c1 * c2 > 1e-4)
return -1;
c1 = -1e30; c2 = 1e30;
}
else {
c1 /= ray_t.z;
c2 /= ray_t.z;
}
if (c1 > c2) {
tmp = c1; c1 = c2; c2 = tmp;
}
float t1, t2;
t1 = max(a1, max(b1, c1));
t2 = min(a2, min(b2, c2));
if (t2 >= t1 && t2 >= 0)
return (t1 > 0) ? t1 : 0;
else
return -1;
}
__device__ __host__
float bvh_intersect(glm::vec3& ray_o, glm::vec3& ray_t, int& index, float& u, float& v,
glm::vec3* vertexBuffer, BVHData* bvh) {
float depth = 1e30;
index = -1;
BVHData* bvh_node = bvh;
BVHData* last_node = 0;
float u1, v1;
int t = 0;
while (bvh_node >= bvh) {
t += 1;
if (last_node == 0) {
float cur_depth = box_intersect(bvh_node, ray_o, ray_t);
if (cur_depth < 0 || cur_depth > depth) {
last_node = bvh_node;
bvh_node = bvh + bvh_parent(bvh_node);
continue;
}
if (bvh_left(bvh_node) < 0) {
int cur_index = bvh_index(bvh_node);
cur_depth = rayIntersectsTriangle(ray_o, ray_t, vertexBuffer[cur_index],
vertexBuffer[cur_index + 1], vertexBuffer[cur_index + 2], u1, v1);
if (cur_depth >= 0 && cur_depth < depth) {
index = cur_index;
u = u1;
v = v1;
depth = cur_depth;
}
last_node = bvh_node;
bvh_node = bvh + bvh_parent(bvh_node);
continue;
}
else {
last_node = 0;
if (bvh_dir(bvh_node, ray_t)) {
bvh_node = bvh + bvh_left(bvh_node);
}
else {
bvh_node = bvh + bvh_right(bvh_node);
}
}
}
else {
bool dir = bvh_dir(bvh_node, ray_t);
BVHData* left_node = bvh + bvh_left(bvh_node);
BVHData* right_node = bvh + bvh_right(bvh_node);
if (dir && left_node == last_node) {
last_node = 0;
bvh_node = bvh + bvh_right(bvh_node);
}
else
if (!dir && right_node == last_node) {
last_node = 0;
bvh_node = bvh + bvh_left(bvh_node);
}
else {
last_node = bvh_node;
bvh_node = bvh + bvh_parent(bvh_node);
}
}
}
return depth;
}
/* Tracing Algorithm */
//#define BVH_
__device__ __host__
float tracing(glm::vec3& ray_o, glm::vec3& ray_t, float shadow, int& tri, int& obj, glm::vec3& hit_point, glm::vec2& uv, glm::vec3& normal,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
BVHData* bvh) {
#ifndef BVH_
float depth = 1e30;
obj = -1;
tri = -1;
int j = 0;
for (int k = 0; k < num_object; ++k) {
int next_object = instanceData[k].s;
if ((k < 1 || k >= 7) && !BoundingBoxIntersect(ray_o, ray_t, instanceData[k].minPos, instanceData[k].maxPos)) {
j = next_object;
continue;
}
while (j < next_object) {
glm::vec3& v1 = vertexBuffer[j];
glm::vec3& v2 = vertexBuffer[j + 1];
glm::vec3& v3 = vertexBuffer[j + 2];
float u, v;
float t = rayIntersectsTriangle(ray_o, ray_t, v1, v2, v3, u, v);
if (t > 1e-2 && t < depth) {
depth = t;
hit_point = ((ray_o + ray_t * depth));
if (shadow >= 0) {
if (t < shadow) {
return t;
}
}
else {
obj = k;
tri = j;
glm::vec3& n1 = normalBuffer[j];
glm::vec3& n2 = normalBuffer[j + 1];
glm::vec3& n3 = normalBuffer[j + 2];
normal = u * (n2 - n1) + v * (n3 - n1) + n1;
glm::vec2& uv1 = texBuffer[j];
glm::vec2& uv2 = texBuffer[j + 1];
glm::vec2& uv3 = texBuffer[j + 2];
uv = uv1 + u * (uv2 - uv1) + v * (uv3 - uv1);
}
}
j += 3;
}
}
normal = normalize(normal);
return depth;
#else
float depth = 1e30;
obj = -1;
tri = -1;
for (int k = 0; k < num_object; ++k) {
int index;
float u, v;
float t = bvh_intersect(ray_o, ray_t, index, u, v, vertexBuffer, bvh + instanceData[k].bvh_offset);
if (t > 1e-2 && t < depth) {
depth = t;
hit_point = ((ray_o + ray_t * depth));
if (shadow >= 0) {
if (t < shadow) {
return t;
}
}
else {
obj = k;
tri = index;
glm::vec3& n1 = normalBuffer[tri];
glm::vec3& n2 = normalBuffer[tri + 1];
glm::vec3& n3 = normalBuffer[tri + 2];
normal = u * (n2 - n1) + v * (n3 - n1) + n1;
glm::vec2& uv1 = texBuffer[tri];
glm::vec2& uv2 = texBuffer[tri + 1];
glm::vec2& uv3 = texBuffer[tri + 2];
uv = uv1 + u * (uv2 - uv1) + v * (uv3 - uv1);
}
}
}
normal = normalize(normal);
return depth;
#endif
}
__device__ __host__
glm::vec3 lighting(glm::vec3& start_camera, glm::vec3& point, glm::vec3& normal, int tri_index, glm::vec2& uv, int obj_index,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
int num_direct_light, glm::vec3* direct_lights, glm::vec3* direct_lights_color,
int num_point_light, glm::vec3* point_lights, glm::vec3* point_lights_color, glm::vec3& ambient,
uchar3* imagesBuffer, glm::ivec3* imageOffsetBuffer, glm::vec3& orig_color, glm::vec3* causticMap, BVHData* bvh, float depth, uchar3* environment,
glm::vec3& ray_t, glm::vec3* scatterMap, glm::vec3* scatterPosMap, float* shadowMap, int render_mode) {
float kd = instanceData[obj_index].kd;
float ks = instanceData[obj_index].ks;//texture2D(materialSampler, vec2(1.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r;
float ka = instanceData[obj_index].ka;// texture2D(materialSampler, vec2(16.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r;
float alpha = instanceData[obj_index].alpha;// texture2D(materialSampler, vec2(20.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r;
if (depth > 1000)
orig_color = fetchEnvironment(ray_t, 3000, 6000, environment);
else
orig_color = fetchTex(uv, obj_index, imagesBuffer, imageOffsetBuffer);
// int tex = int(0.1 + texture2D(materialSampler, vec2(2.5 / MATERIAL_LEN, (obj_index + 0.5) / num_object)).r);
// orig_color = texture2D(renderSampler[tex], uv).rgb;
glm::vec3 color = ka * orig_color * ambient;
glm::vec3 eye_dir = normalize(start_camera - point);
int t1, t2;
glm::vec2 v1;
glm::vec3 v2, v3;
for (int i = 0; i < num_direct_light; ++i) {
float intensity = glm::dot(-direct_lights[i], normal) * glm::dot(eye_dir, normal);
if (intensity < 0)
continue;
float depth = tracing(point, -direct_lights[i], 100, t1, t2, v2, v1, v3, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (obj_index == 0) {
float rx = (point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float ry = (point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
if (rx >= 0 && ry >= 0 && rx < CAUSTIC_W && ry < CAUSTIC_W) {
int lx = rx, ly = ry;
int rrx = lx + 1, rry = ly + 1;
float wx = rx - lx, wy = ry - ly;
float shadow1 = shadowMap[ly * CAUSTIC_W + lx];
float shadow2 = shadowMap[ly * CAUSTIC_W + rrx];
float shadow3 = shadowMap[rry * CAUSTIC_W + lx];
float shadow4 = shadowMap[rry * CAUSTIC_W + rrx];
float shadow = (shadow1 * (1 - wx) + shadow2 * wx) * (1 - wy) + (shadow3 * (1 - wx) + shadow4 * wx) * wy;
intensity *= shadow;
} else
continue;
}
color += intensity * (orig_color * direct_lights_color[i] * kd
+ clamp((float)pow(glm::dot(glm::reflect(direct_lights[i], normal), eye_dir), alpha), 0.0f, 1.f) * ks * direct_lights_color[i]);
}
for (int i = 0; i < num_point_light; ++i) {
glm::vec3 dis = point - point_lights[i];
float len = glm::length(dis);
float l = 1 / (len * len);
dis = normalize(dis);
float intensity = glm::dot(-dis, normal) * glm::dot(eye_dir, normal);
if (intensity < 0)
continue;
float depth = tracing(point, -dis, len, t1, t2, v2, v1, v3, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (depth < len)
continue;
glm::vec3 para = kd * l * point_lights_color[i];
color = color + intensity * (orig_color * para
+ clamp((float)pow(dot(reflect(dis, normal), eye_dir), alpha), 0.f, 1.f) * ks * point_lights_color[i]);
}
if (obj_index == 0) {
float rx = (point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float ry = (point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
if (rx < CAUSTIC_W - 1 && ry < CAUSTIC_W-1 && rx >= 0 && ry >= 0) {
int lx = rx, ly = ry;
int rrx = lx + 1, rry = ly + 1;
float wx = rx - lx, wy = ry - ly;
glm::vec3& caustic1 = causticMap[ly * CAUSTIC_W + lx];
glm::vec3& caustic2 = causticMap[ly * CAUSTIC_W + rrx];
glm::vec3& caustic3 = causticMap[rry * CAUSTIC_W + lx];
glm::vec3& caustic4 = causticMap[rry * CAUSTIC_W + rrx];
glm::vec3 caustic = (caustic1 * (1 - wx) + caustic2 * wx) * (1 - wy) + (caustic3 * (1 - wx) + caustic4 * wx) * wy;
if (render_mode == 2) {
color = caustic;
return color;
}
color = color + glm::dot(eye_dir, normal) * kd * caustic;
float max_v = max(max(color.x, color.y), color.z) / 255.0f;
if (max_v > 1)
color /= max_v;
}
}
if (instanceData[obj_index].kt > 1e-3) {
float radius = 0.4f;
glm::vec3 lightp = point + -point.y / direct_lights[0].y * direct_lights[0];
float x = (lightp.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float y = (lightp.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
int tx = x, ty = y;
if (tx < 0 || tx >= CAUSTIC_W || ty < 0 || ty >= CAUSTIC_W)
return glm::vec3(0,0,0);
int bandwidth = radius / abs(direct_lights[0].y) / CAUSTIC_MAP_DIS / 2;
glm::vec3 lights(0, 0, 0);
for (int ly = y - bandwidth; ly <= y + bandwidth; ++ly) {
for (int lx = x - bandwidth; lx <= x + bandwidth; ++lx) {
if (ly < 0 || ly >= CAUSTIC_W || lx < 0 || lx >= CAUSTIC_W)
continue;
float r = glm::length(point - scatterPosMap[ly * CAUSTIC_W + lx]);
float weight = exp(-(r*r) / (radius*radius * 2)) * 2.5e-3 / (radius * radius);
lights += weight * scatterMap[ly * CAUSTIC_W + lx];
}
}
if (render_mode == 5) {
return (lights * 255.0f);
}
lights.x = clamp(lights.x, 0.f, 1.f);
lights.y = clamp(lights.y, 0.f, 1.f);
lights.z = clamp(lights.z, 0.f, 1.f);
color += lights * orig_color;
color.x = clamp(color.x, 0.0f, 255.f);
color.y = clamp(color.y, 0.0f, 255.f);
color.z = clamp(color.z, 0.0f, 255.f);
}
return color;
}
__global__ void
render(unsigned int *g_odata, int imgw, int imgh,
glm::vec3 cam_up, glm::vec3 cam_forward, glm::vec3 right, glm::vec3 cam_pos, float dis_per_pix,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
int num_direct_lights, glm::vec3* direct_lights, glm::vec3* direct_lights_color,
int num_point_lights, glm::vec3* point_lights, glm::vec3* point_lights_color, glm::vec3 ambient,
uchar3* imagesBuffer, glm::ivec3* imageOffsetBuffer,
glm::vec3* causticMap, BVHData* bvh, uchar3* environment, glm::vec3* scatterMap, glm::vec3* scatterPos, float* shadowMap, int render_mode)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
if (render_mode == 1) {
g_odata[y * imgw + x] = rgbToInt(causticMap[y * imgw + x].x, causticMap[y * imgw + x].y, causticMap[y * imgw + x].z);
return;
}
if (render_mode == 4) {
g_odata[y * imgw + x] = rgbToInt(scatterMap[y * imgw + x].x * 255, scatterMap[y * imgw + x].y * 255, scatterMap[y * imgw + x].z * 255);
return;
}
glm::vec3 ray_p = cam_pos;
glm::vec3 ray_d = glm::normalize(cam_forward + (x - imgw / 2) * dis_per_pix * right + (y - imgh / 2) * dis_per_pix * cam_up);
glm::vec3 color(0, 0, 0);
int tri_index, obj_index;
int path_state[PATH_DEPTH];
int mat_stack[PATH_DEPTH];
glm::vec3 light_stack[PATH_DEPTH];
glm::vec3 color_stack[PATH_DEPTH];
glm::vec3 from_stack[PATH_DEPTH];
glm::vec3 to_stack[PATH_DEPTH];
glm::vec3 normal_stack[PATH_DEPTH];
int node = 0;
path_state[node] = 0;
from_stack[node] = ray_p;
to_stack[node] = ray_d;
color_stack[node] = glm::vec3(0, 0, 0);
light_stack[node] = glm::vec3(0, 0, 0);
float nr;
int hit_mat = 0;
glm::vec3 hit_point;
glm::vec2 uv;
glm::vec3 normal;
while (node >= 0) {
if (path_state[node] == 0) {
path_state[node] = 1;
float depth;
depth = tracing(from_stack[node], to_stack[node], -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (depth < 1e20) {
glm::vec3 orig_color;
light_stack[node] = lighting(from_stack[node], hit_point, normal, tri_index, uv, obj_index, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object,
num_direct_lights, direct_lights, direct_lights_color, num_point_lights, point_lights, point_lights_color, ambient,
imagesBuffer, imageOffsetBuffer, orig_color, causticMap, bvh, depth, environment, to_stack[node], scatterMap, scatterPos, shadowMap, render_mode);
color_stack[node] = orig_color;
normal_stack[node] = normal;
ray_d = to_stack[node];
to_stack[node] = hit_point;
mat_stack[node] = obj_index;
float kr = instanceData[obj_index].kr;
if (kr > 0 && node < PATH_DEPTH - 1) {
color_stack[node] = instanceData[obj_index].kr * color_stack[node];
node += 1;
path_state[node] = 0;
from_stack[node] = hit_point;
to_stack[node] = ray_d - 2 * glm::dot(ray_d, normal) * normal;
light_stack[node] = glm::vec3(0, 0, 0);
continue;
}
}
else {
path_state[node] = 3;
}
}
if (path_state[node] == 1) {
path_state[node] = 2;
obj_index = mat_stack[node];
float kf = instanceData[obj_index].kf;
if (kf > 0 && node < PATH_DEPTH - 1) {
nr = instanceData[obj_index].nr;
normal = normal_stack[node];
ray_d = glm::normalize(to_stack[node] - from_stack[node]);
float cost = glm::dot(normal, ray_d);
if (cost < 0) {
nr = 1 / nr;
cost = -cost;
}
else {
normal = -normal;
}
float rootContent = 1 - nr * nr * (1 - cost * cost);
if (rootContent >= 0) {
color_stack[node] = instanceData[obj_index].kf * color_stack[node];
rootContent = sqrt(rootContent);
node += 1;
path_state[node] = 0;
from_stack[node] = to_stack[node - 1];
to_stack[node] = (nr * cost - rootContent) * normal + nr * ray_d;
light_stack[node] = glm::vec3(0, 0, 0);
continue;
}
else {
float kr = 1;
if (kr > 0 && node < PATH_DEPTH - 1) {
light_stack[node] = glm::vec3(0, 0, 0);
node += 1;
path_state[node] = 0;
from_stack[node] = to_stack[node - 1];
to_stack[node] = ray_d - 2 * glm::dot(ray_d, normal) * normal;
light_stack[node] = glm::vec3(0, 0, 0);
continue;
}
else {
g_odata[y*imgw + x] = 0;
return;
}
}
}
}
if (path_state[node] == 2) {
path_state[node] = 3;
obj_index = mat_stack[node];
}
if (path_state[node] == 3) {
if (node == 0)
break;
int obj_index = mat_stack[node - 1];
if (path_state[node - 1] == 1) {
light_stack[node - 1] = (1 - instanceData[obj_index].kr) * light_stack[node - 1]
+ color_stack[node - 1] * light_stack[node] / 255.0f;
}
else
if (path_state[node - 1] == 2) {
light_stack[node - 1] = (1 - instanceData[obj_index].kf) * light_stack[node - 1]
+ color_stack[node - 1] * light_stack[node] / 255.0f;
}
else {
hit_mat -= 1;
normal = normal_stack[node - 1];
ray_d = glm::normalize(to_stack[node - 1] - from_stack[node - 1]);
float alpha = instanceData[obj_index].alpha;
light_stack[node - 1] = (1 - instanceData[obj_index].ks) * light_stack[node - 1]
+ instanceData[obj_index].ks * color_stack[node - 1] * light_stack[node] * glm::dot(-ray_d, normal) / 255.0f;
}
node -= 1;
}
}
uchar4 c4 = make_uchar4(light_stack[0].r, light_stack[0].g, light_stack[0].b, 255);
g_odata[y*imgw + x] = rgbToInt(c4.x, c4.y, c4.z);
}
/* Filtering */
__global__ void
filter(unsigned int *g_odata, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int id = y * imgw + x;
if (g_odata[id] == 0) {
glm::vec3 rgb(0, 0, 0);
int count = 0;
for (int dx = -5; dx <= 5; ++dx) {
for (int dy = -5; dy <= 5; ++dy) {
int nx = x + dx;
int ny = y + dy;
if (nx >= 0 && nx < imgw && ny >= 0 && ny < imgh) {
int nid = ny * imgw + nx;
if (g_odata[nid] != 0) {
count += 1;
rgb += Inttorgb(g_odata[nid]);
}
}
}
}
if (count > 0)
g_odata[id] = rgbToInt(rgb.r / count, rgb.g / count, rgb.b / count);
else
g_odata[id] = rgbToInt(0, 0, 0);
}
}
__global__ void
FilterCaustic(glm::ivec3* causticMap, glm::vec3* causticBuffer, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int id = y * imgw + x;
auto& pix = causticMap[id];
int temp[3][3] =
{
{ 1, 2, 1 },
{ 2, 4, 2 },
{ 1, 2, 1 }
};
if (pix.x == 0 && pix.y == 0 && pix.z == 0 || true) {
glm::ivec4 pt;
for (int py = y - 1; py <= y + 1; ++py) {
if (py < 0 || py >= imgh)
continue;
for (int px = x - 1; px <= x + 1; ++px) {
if (px < 0 || px >= imgw)
continue;
int dy = py - y + 1;
int dx = px - x + 1;
auto& p = causticMap[py * imgw + px];
if (p.x != 0 || p.y != 0 || p.z != 0) {
pt += glm::ivec4(p, 1) * temp[dy][dx];
}
}
}
if (pt.w > 0)
causticBuffer[id] = glm::vec3((float)pt.x / pt.w, (float)pt.y / pt.w, (float)pt.z / pt.w);
else
causticBuffer[id] = glm::vec3(0, 0, 0);
}
else {
causticBuffer[id] = glm::vec3(pix.x, pix.y, pix.z);
}
}
/* Caustic Rendering */
__device__ __host__
glm::vec3 projectCaustic(glm::vec3& ray_o, glm::vec3& ray_t, glm::vec3 &color,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
glm::vec3& light, glm::vec2& coords, uchar3* texImages, glm::ivec3* imageOffsets, BVHData* bvh, glm::vec3& scatterPos, float& softShadow) {
int tri_index, obj_index;
glm::vec3 hit_point, normal;
glm::vec2 uv;
float depth = tracing(ray_o, ray_t, -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (obj_index == 0)
softShadow = 1;
else
softShadow = 0;
glm::vec3 orig_color = fetchTex(uv, obj_index, texImages, imageOffsets) / 255.0f;
int steps = 0;
float intensity = 1;
while (depth < 1e20 && (instanceData[obj_index].kr > 1e-3 || instanceData[obj_index].kf > 1e-3 || instanceData[obj_index].kt > 1e-3)) {
if (instanceData[obj_index].kt > 1e-3) {
float x = (hit_point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float y = (hit_point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
scatterPos = hit_point;
return color * -glm::dot(normal, ray_t);
}
if (instanceData[obj_index].kf != 0) {
float nr = instanceData[obj_index].nr;
float cost = glm::dot(normal, ray_t);
if (cost < 0) {
nr = 1 / nr;
cost = -cost;
}
else {
normal = -normal;
}
float rootContent = 1 - nr * nr * (1 - cost * cost);
if (rootContent >= 0) {
ray_o = glm::vec3(hit_point.x, hit_point.y, hit_point.z);
ray_t = (nr * cost - sqrt(rootContent)) * normal + nr * ray_t;
intensity *= instanceData[obj_index].kf * 0.6;
}
else {
ray_o = glm::vec3(hit_point.x, hit_point.y, hit_point.z);
ray_t = glm::reflect(ray_t, glm::vec3(normal.x, normal.y, normal.z));
}
}
else if (instanceData[obj_index].kr != 0) {
ray_o = glm::vec3(hit_point.x, hit_point.y, hit_point.z);
ray_t = glm::reflect(ray_t, glm::vec3(normal.x, normal.y, normal.z));
intensity *= instanceData[obj_index].kr;
}
steps++;
if (steps > 2)
break;
depth = tracing(ray_o, ray_t, -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
}
if (obj_index == 0 && steps > 0) {
float x = (hit_point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
float y = (hit_point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
coords = glm::vec2(x, y);
light = intensity * color * orig_color;
}
else {
coords = glm::vec2(-1, -1);
light = glm::vec3(0, 0, 0);
}
return glm::vec3(0, 0, 0);
}
__global__ void
ClearBuffer(glm::ivec3 *g_odata, glm::vec3* g_light, float* shadow, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
g_odata[y * imgw + x] = glm::ivec3(0, 0, 0);
g_light[y * imgw + x] = glm::vec3(0, 0, 0);
shadow[y * imgw + x] = 0;
}
__global__ void
FilterShadow(float* input, float* output, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int total = 0;
float shad = 0;
for (int dy = y - 3; dy <= y + 3; ++dy) {
for (int dx = x - 3; dx <= x + 3; ++dx) {
if (dy >= 0 && dy < imgh && dx >= 0 && dx < imgw)
{
shad += input[dy * imgw + dx];
total += 1;
}
}
}
if (total != 0)
output[y * imgw + x] = shad / total;
else
output[y * imgw + x] = 0;
}
__global__ void
CausticRender(glm::vec3 *causticMap, glm::vec2* cuasticCoords, int imgw, int imgh,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object,
glm::vec3 dir, glm::vec3 color, uchar3* texImages, glm::ivec3* imageOffsets, BVHData* bvh, glm::vec3* scatterBuffer, glm::vec3* scatterPos, float* softShadow) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
glm::vec3 point(x * CAUSTIC_MAP_DIS + CAUSTIC_X_MIN, 0, y * CAUSTIC_MAP_DIS + CAUSTIC_X_MIN);
scatterBuffer[y * CAUSTIC_W + x] = projectCaustic(point - dir * 1000.0f, dir, color, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object,
causticMap[y * CAUSTIC_W + x], cuasticCoords[y * imgw + x], texImages, imageOffsets, bvh, scatterPos[y * CAUSTIC_W + x], softShadow[y * CAUSTIC_W + x]);
}
__global__ void
combineCaustic(unsigned int *g_odata, glm::ivec3* causticMap, int imgw, int imgh,
glm::vec3 cam_up, glm::vec3 cam_forward, glm::vec3 right, glm::vec3 cam_pos, float dis_per_pix,
InstanceData* instanceData, glm::vec3* vertexBuffer, glm::vec3* normalBuffer, glm::vec2* texBuffer, int num_object, BVHData* bvh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
glm::vec3 ray_p = cam_pos;
glm::vec3 ray_d = glm::normalize(cam_forward + (x - imgw / 2) * dis_per_pix * right + (y - imgh / 2) * dis_per_pix * cam_up);
glm::vec3 color(0, 0, 0);
int tri_index, obj_index;
glm::vec3 hit_point, normal;
glm::vec2 uv;
float depth;
depth = tracing(ray_p, ray_d, -1, tri_index, obj_index, hit_point, uv, normal, instanceData, vertexBuffer, normalBuffer, texBuffer, num_object, bvh);
if (obj_index == 0) {
int rx = (hit_point.x - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
int ry = (hit_point.z - CAUSTIC_X_MIN) / CAUSTIC_MAP_DIS;
if (rx < CAUSTIC_W && ry < CAUSTIC_W && rx >= 0 && ry >= 0) {
auto& p = causticMap[ry * CAUSTIC_W + rx];
glm::vec3 np = Inttorgb(g_odata[y * imgw + x]);
np += p;
np.x = clamp(np.x, 0.f, 255.f);
np.y = clamp(np.y, 0.f, 255.f);
np.z = clamp(np.z, 0.f, 255.f);
np = glm::vec3(255, 0, 0);
g_odata[y * imgw + x] = rgbToInt(p.x, p.y, p.z);
}
}
}
__global__ void
SplatCaustic(glm::vec3* caustics, glm::vec2* causticCoords, glm::ivec3* causticMaps, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
int ry = y + 1, rx = x + 1;
if (ry < imgh && rx < imgw) {
int id[4];
id[0] = y * imgw + x;
id[1] = id[0] + 1;
id[2] = id[0] + imgw;
id[3] = id[2] + 1;
float minX = 1e20f, maxX = -1e20f, minY = 1e20f, maxY = -1e20f;
for (int i = 0; i < 4; ++i) {
auto& p = causticCoords[id[i]];
if (causticCoords[id[i]].x < 0)
return;
if (p.x < minX)
minX = p.x;
if (p.x > maxX)
maxX = p.x;
if (p.y < minY)
minY = p.y;
if (p.y > maxY)
maxY = p.y;
}
if (maxX - minX > 15 || maxY - minY > 15)
return;
int stepX = (maxX - minX) + 1;
int stepY = (maxY - minY) + 1;
int steps;
if (stepX > stepY)
steps = stepX;
else
steps = stepY;
if (steps == 1)
steps += 1;
// steps *= 2;
float weight = 255.0 / (steps * steps);
float stepW = 1.0 / (steps - 1);
for (int i = 0; i < steps; ++i) {
for (int j = 0; j < steps; ++j) {
float wx = stepW * j;
float wy = stepW * i;
glm::vec3 interp = (caustics[id[0]] * (1 - wx) + caustics[id[1]] * wx) * (1 - wy)
+ (caustics[id[2]] * (1 - wx) + caustics[id[3]] * wx) * wy;
glm::vec2 interp_coords = (causticCoords[id[0]] * (1 - wx) + causticCoords[id[1]] * wx) * (1 - wy)
+ (causticCoords[id[2]] * (1 - wx) + causticCoords[id[3]] * wx) * wy;
int nx = interp_coords.x, ny = interp_coords.y;
if (nx >= 0 && nx < imgw && ny >= 0 && ny < imgh) {
atomicAdd(&causticMaps[ny * imgw + nx].x, interp.x * weight);
atomicAdd(&causticMaps[ny * imgw + nx].y, interp.y * weight);
atomicAdd(&causticMaps[ny * imgw + nx].z, interp.z * weight);
}
}
}
}
}
/* GPU Render Entry */
__global__ void
SplatCaustic(glm::vec3* buffer, glm::vec3* map, int imgw, int imgh) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
}
extern "C" void
cudaRender(dim3 grid, dim3 block, int sbytes, unsigned int *g_odata, int imgw, int imgh)
{
static float count = 1;
float dis_per_pix = tan(World::fov * 0.5 * 3.141592654 / 180.0) / (imgw / 2);
glm::vec3 right = glm::cross(World::camera_lookat, World::camera_up);
dim3 grid1(CAUSTIC_W / block.x, CAUSTIC_W / block.y, 1);
static float angle = 0.0;
static float angle_dir = 1.0f;
if (g_world.pause) {
angle += angle_dir;
}
if (angle > 30.0f || angle < -30.0f)
angle_dir = -angle_dir;
float rad = angle / 180.0 * CV_PI;
glm::mat3 rot(1.0f);
rot[0][0] = cos(rad);
rot[0][2] = sin(rad);
rot[2][0] = -sin(rad);
rot[2][2] = cos(rad);
glm::vec3 new_dir = rot * g_world.lights.direct_light_dir[0];
cudaMemcpy(g_world.directLightsBuffer, &new_dir, sizeof(glm::vec3), cudaMemcpyHostToDevice);
ClearBuffer << < grid1, block, sbytes >> >(g_world.causticMapBuffer, g_world.scatterBuffer, g_world.softShadowBuffer, CAUSTIC_W, CAUSTIC_W);
for (int i = 0; i < g_world.lights.direct_light_dir.size(); ++i) {
CausticRender << < grid1, block, sbytes >> > (g_world.causticBuffer, g_world.causticCoordsBuffer, CAUSTIC_W, CAUSTIC_W,
g_world.materialBuffer, g_world.vertexBuffer, g_world.normalBuffer, g_world.texBuffer, g_world.num_objects,
new_dir, g_world.lights.direct_light_color[i], g_world.texImagesBuffer, g_world.texOffsetBuffer, g_world.bvhDataBuffer,
g_world.scatterBuffer, g_world.scatterPosBuffer, g_world.softShadowBuffer);
SplatCaustic << < grid1, block, sbytes >> > (g_world.causticBuffer, g_world.causticCoordsBuffer, g_world.causticMapBuffer, CAUSTIC_W, CAUSTIC_W);
FilterCaustic << < grid1, block, sbytes >> > (g_world.causticMapBuffer, g_world.causticBuffer, CAUSTIC_W, CAUSTIC_W);
FilterShadow << < grid1, block, sbytes >> > (g_world.softShadowBuffer, g_world.softShadowMap, CAUSTIC_W, CAUSTIC_W);
}
render << < grid, block, sbytes >> >(g_odata, imgw, imgh,
World::camera_up, World::camera_lookat, right, World::camera, dis_per_pix,
g_world.materialBuffer, g_world.vertexBuffer, g_world.normalBuffer, g_world.texBuffer, g_world.num_objects,
g_world.lights.direct_light_dir.size(), g_world.directLightsBuffer, g_world.directLightsColorBuffer,
g_world.lights.point_light_pos.size(), g_world.pointLightsBuffer, g_world.pointLightsColorBuffer, g_world.lights.ambient * count,
g_world.texImagesBuffer, g_world.texOffsetBuffer,
g_world.causticBuffer, g_world.bvhDataBuffer, g_world.environmentBuffer, g_world.scatterBuffer, g_world.scatterPosBuffer,
g_world.softShadowMap, g_world.rendering_mode);
if (g_world.rendering_mode == 0 || g_world.rendering_mode == 3)
filter << < grid, block, sbytes >> >(g_odata, imgw, imgh);
printf("%d\n", g_world.rendering_mode);
/* combineCaustic << < grid, block, sbytes >> >(g_odata, g_world.causticMapBuffer, imgw, imgh,
World::camera_up, World::camera_lookat, right, World::camera, dis_per_pix,
g_world.materialBuffer, g_world.vertexBuffer, g_world.normalBuffer, g_world.texBuffer, g_world.num_objects);*/
}
|
91aa8fce8233bf4e6c432c5c4f3a61eede48ee9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <sstream>
#include <iomanip>
#include <math.h>
#include <algorithm>
#include <string>
#include <hip/hip_runtime.h>
using namespace std;
#define CSC(call) \
do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while (0) \
\
typedef uchar4 pixels;
typedef double pixFloat[3];
typedef pixFloat matrix3[3];
__constant__ pixFloat dev_cAvg[32];
__constant__ matrix3 dev_cMatrInv[32];
typedef unsigned char bytes;
struct image {
int width;
int height;
pixels* pixs;
};
struct pixel {
int x;
int y;
};
image newImage(int w, int h) {
image nIMG;
nIMG.width = w;
nIMG.height = h;
nIMG.pixs = new pixels[w * h];
return nIMG;
}
image newImage(string filename) {
FILE* file;
image thisImg;
if ((file = fopen(filename.c_str(), "rb")) == NULL) {
std::cout << "Can't load image from file" << std::endl;
exit(1);
}
fread(&thisImg.width, sizeof(thisImg.width), 1, file);
fread(&thisImg.height, sizeof(thisImg.height), 1, file);
thisImg.pixs = new pixels[thisImg.width * thisImg.height];
fread(thisImg.pixs, sizeof(pixels), thisImg.width * thisImg.height, file);
fclose(file);
return thisImg;
}
void writeToFile(image img, string filename) {
FILE* file = fopen(filename.c_str(), "wb");
fwrite(&img.width, sizeof(img.width), 1, file);
fwrite(&img.height, sizeof(img.height), 1, file);
fwrite(img.pixs, sizeof(pixels), img.width * img.height, file);
fclose(file);
}
string imgToString(image img) {
std::stringstream stream;
stream << img.width << " " << img.height << "\n";
for (int i = 0; i < img.height; i++) {
for (int j = 0; j < img.width; j++) {
int k = i * img.width + j;
stream << hex << setfill('0') << setw(2) << (int)img.pixs[k].x << setfill('0') << setw(2) << (int)img.pixs[k].y << setfill('0') << setw(2) << (int)img.pixs[k].z << setfill('0') << setw(2) << (int)img.pixs[k].w << " ";
}
stream << "\n";
}
return stream.str();
}
__global__ void Mahalanobisse(pixels* pixelsOut, int w, int h, int classes)
{
int tX = blockIdx.x * blockDim.x + threadIdx.x;
int tY = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = gridDim.x * blockDim.x;
int offsetY = gridDim.y * blockDim.y;
for (int i = tY; i < h; i += offsetY)
{
for (int j = tX; j < w; j += offsetX)
{
pixels thisPixel = pixelsOut[j + i * w];
double thisRed = (double)thisPixel.x;
double thisGreen = (double)thisPixel.y;
double thisBlue = (double)thisPixel.z;
double maxAm = 0;
int argMax = -1;
for (int c = 0; c < classes; ++c) {
double ans = 0;
pixFloat vec1;
pixFloat vec2;
pixFloat vec3;
vec1[0] = -(thisRed - dev_cAvg[c][0]);
vec2[0] = thisRed - dev_cAvg[c][0];
vec1[1] = -(thisGreen - dev_cAvg[c][1]);
vec2[1] = thisGreen - dev_cAvg[c][1];
vec1[2] = -(thisBlue - dev_cAvg[c][2]);
vec2[2] = thisBlue - dev_cAvg[c][2];
vec3[0] = vec1[0] * dev_cMatrInv[c][0][0] + vec1[1] * dev_cMatrInv[c][1][0] + vec1[2] * dev_cMatrInv[c][2][0];
vec3[1] = vec1[0] * dev_cMatrInv[c][0][1] + vec1[1] * dev_cMatrInv[c][1][1] + vec1[2] * dev_cMatrInv[c][2][1];
vec3[2] = vec1[0] * dev_cMatrInv[c][0][2] + vec1[1] * dev_cMatrInv[c][1][2] + vec1[2] * dev_cMatrInv[c][2][2];
ans = vec3[0] * vec2[0] + vec3[1] * vec2[1] + vec3[2] * vec2[2];
if (ans > maxAm || argMax == -1) {
maxAm = ans;
argMax = c;
}
}
pixelsOut[j + i * w].w = argMax;
//coloring 3 for report
//if (argMax == 0) {
// pixelsOut[j + i * w].x = 255;
// pixelsOut[j + i * w].y = 0;
// pixelsOut[j + i * w].z = 0;
//}
//else if (argMax == 1) {
// pixelsOut[j + i * w].x = 0;
// pixelsOut[j + i * w].y = 255;
// pixelsOut[j + i * w].z = 0;
//}
//else if (argMax == 2) {
// pixelsOut[j + i * w].x = 0;
// pixelsOut[j + i * w].y = 0;
// pixelsOut[j + i * w].z = 255;
//}
}
}
}
void begin(image* image1, int classes) {
pixels* oldPixels;
int size1 = sizeof(pixels) * image1->width * image1->height;
CSC(hipMalloc((void**)& oldPixels, size1));
int SIZE = 32;
dim3 gridSz(SIZE, SIZE);
dim3 blockSz(SIZE, SIZE);
CSC(hipMemcpy(oldPixels, image1->pixs, size1, hipMemcpyHostToDevice));
hipEvent_t start, end;
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&end));
CSC(hipEventRecord(start));
Mahalanobisse << < gridSz, blockSz >> > (oldPixels, image1->width, image1->height, classes);
CSC(hipGetLastError()); //
CSC(hipEventRecord(end));
CSC(hipEventSynchronize(end));
float t;
CSC(hipEventElapsedTime(&t, start, end));
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(end));
//
printf("time = %f\n", t);
CSC(hipMemcpy(image1->pixs, oldPixels, size1, hipMemcpyDeviceToHost));
CSC(hipFree(oldPixels));
}
int main()
{
string input;
string output;
int w;
cin >> input >> output;
image myImage = newImage(input);
w = myImage.width;
int classes;
cin >> classes;
double curRed = 0;
double curGreen = 0;
double curBlue = 0;
pixFloat* cAvg = new pixFloat[classes];
matrix3* cMatr = new matrix3[classes];
matrix3* cMatrInv = new matrix3[classes];
// .
for (int i = 0; i < classes; ++i) {
long long pixs_am = 0;
curRed = 0;
curGreen = 0;
curBlue = 0;
cin >> pixs_am;
pixel* pixPairs = new pixel[pixs_am];
for (long long j = 0; j < pixs_am; ++j) {
int X, Y;
cin >> X >> Y;
curRed += (double)myImage.pixs[X + w * Y].x;
curGreen += (double)myImage.pixs[X + w * Y].y;
curBlue += (double)myImage.pixs[X + w * Y].z;
pixPairs[j].x = X;
pixPairs[j].y = Y;
}
curRed /= pixs_am;
curGreen /= pixs_am;
curBlue /= pixs_am;
cAvg[i][0] = curRed;
cAvg[i][1] = curGreen;
cAvg[i][2] = curBlue;
matrix3 totalMatrix;
for (int Ti = 0; Ti < 3; ++Ti) {
for (int Tj = 0; Tj < 3; ++Tj) {
totalMatrix[Ti][Tj] = 0;
}
}
for (int j = 0; j < pixs_am; ++j) {
pixFloat vec;
vec[0] = (double)myImage.pixs[pixPairs[j].x + w * pixPairs[j].y].x - cAvg[i][0];
vec[1] = (double)myImage.pixs[pixPairs[j].x + w * pixPairs[j].y].y - cAvg[i][1];
vec[2] = (double)myImage.pixs[pixPairs[j].x + w * pixPairs[j].y].z - cAvg[i][2];
for (int Ti = 0; Ti < 3; ++Ti) {
for (int Tj = 0; Tj < 3; ++Tj) {
totalMatrix[Ti][Tj] += vec[Ti] * vec[Tj];
}
}
}
for (int Ti = 0; Ti < 3; ++Ti) {
for (int Tj = 0; Tj < 3; ++Tj) {
totalMatrix[Ti][Tj] /= max(0.000001, (double)pixs_am - 1);
cMatr[i][Ti][Tj] = totalMatrix[Ti][Tj];
}
}
delete[] pixPairs;
}
for (int i = 0; i < classes; ++i) {
double det = 0;
det = cMatr[i][0][0] * cMatr[i][1][1] * cMatr[i][2][2] + cMatr[i][0][2] * cMatr[i][1][0] * cMatr[i][2][1] +
cMatr[i][0][1] * cMatr[i][1][2] * cMatr[i][2][0] - cMatr[i][2][0] * cMatr[i][1][1] * cMatr[i][0][2] -
cMatr[i][0][1] * cMatr[i][1][0] * cMatr[i][2][2] - cMatr[i][0][0] * cMatr[i][1][2] * cMatr[i][2][1];
if (det == 0) det = 0.0000001; //
matrix3 transp;
for (int x = 0; x < 3; ++x) {
for (int y = 0; y < 3; ++y) {
transp[x][y] = cMatr[i][y][x];
}
}
double dop1 = transp[1][1] * transp[2][2] - transp[1][2] * transp[2][1];
double dop4 = transp[1][2] * transp[2][0] - transp[1][0] * transp[2][2];
double dop7 = transp[1][0] * transp[2][1] - transp[1][1] * transp[2][0];
double dop2 = transp[0][2] * transp[2][1] - transp[0][1] * transp[2][2];
double dop5 = transp[0][0] * transp[2][2] - transp[0][2] * transp[2][0];
double dop8 = transp[0][1] * transp[2][0] - transp[0][0] * transp[2][1];
double dop3 = transp[0][1] * transp[1][2] - transp[0][2] * transp[1][1];
double dop6 = transp[0][2] * transp[1][0] - transp[0][0] * transp[1][2];
double dop9 = transp[0][0] * transp[1][1] - transp[0][1] * transp[1][0];
cMatrInv[i][0][0] = dop1 / det;
cMatrInv[i][0][1] = dop2 / det;
cMatrInv[i][0][2] = dop3 / det;
cMatrInv[i][1][0] = dop4 / det;
cMatrInv[i][1][1] = dop5 / det;
cMatrInv[i][1][2] = dop6 / det;
cMatrInv[i][2][0] = dop7 / det;
cMatrInv[i][2][1] = dop8 / det;
cMatrInv[i][2][2] = dop9 / det;
}
hipMemcpyToSymbol(dev_cAvg, cAvg, sizeof(pixFloat) * classes, 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(dev_cMatrInv, cMatrInv, sizeof(matrix3) * classes, 0, hipMemcpyHostToDevice);
delete[] cAvg;
delete[] cMatr;
delete[] cMatrInv;
begin(&myImage, classes);
writeToFile(myImage, output);
return 0;
} | 91aa8fce8233bf4e6c432c5c4f3a61eede48ee9a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <sstream>
#include <iomanip>
#include <math.h>
#include <algorithm>
#include <string>
#include <cuda.h>
using namespace std;
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0) \
\
typedef uchar4 pixels;
typedef double pixFloat[3];
typedef pixFloat matrix3[3];
__constant__ pixFloat dev_cAvg[32];
__constant__ matrix3 dev_cMatrInv[32];
typedef unsigned char bytes;
struct image {
int width;
int height;
pixels* pixs;
};
struct pixel {
int x;
int y;
};
image newImage(int w, int h) {
image nIMG;
nIMG.width = w;
nIMG.height = h;
nIMG.pixs = new pixels[w * h];
return nIMG;
}
image newImage(string filename) {
FILE* file;
image thisImg;
if ((file = fopen(filename.c_str(), "rb")) == NULL) {
std::cout << "Can't load image from file" << std::endl;
exit(1);
}
fread(&thisImg.width, sizeof(thisImg.width), 1, file);
fread(&thisImg.height, sizeof(thisImg.height), 1, file);
thisImg.pixs = new pixels[thisImg.width * thisImg.height];
fread(thisImg.pixs, sizeof(pixels), thisImg.width * thisImg.height, file);
fclose(file);
return thisImg;
}
void writeToFile(image img, string filename) {
FILE* file = fopen(filename.c_str(), "wb");
fwrite(&img.width, sizeof(img.width), 1, file);
fwrite(&img.height, sizeof(img.height), 1, file);
fwrite(img.pixs, sizeof(pixels), img.width * img.height, file);
fclose(file);
}
string imgToString(image img) {
std::stringstream stream;
stream << img.width << " " << img.height << "\n";
for (int i = 0; i < img.height; i++) {
for (int j = 0; j < img.width; j++) {
int k = i * img.width + j;
stream << hex << setfill('0') << setw(2) << (int)img.pixs[k].x << setfill('0') << setw(2) << (int)img.pixs[k].y << setfill('0') << setw(2) << (int)img.pixs[k].z << setfill('0') << setw(2) << (int)img.pixs[k].w << " ";
}
stream << "\n";
}
return stream.str();
}
__global__ void Mahalanobisse(pixels* pixelsOut, int w, int h, int classes)
{
int tX = blockIdx.x * blockDim.x + threadIdx.x;
int tY = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = gridDim.x * blockDim.x;
int offsetY = gridDim.y * blockDim.y;
for (int i = tY; i < h; i += offsetY)
{
for (int j = tX; j < w; j += offsetX)
{
pixels thisPixel = pixelsOut[j + i * w];
double thisRed = (double)thisPixel.x;
double thisGreen = (double)thisPixel.y;
double thisBlue = (double)thisPixel.z;
double maxAm = 0;
int argMax = -1;
for (int c = 0; c < classes; ++c) {
double ans = 0;
pixFloat vec1;
pixFloat vec2;
pixFloat vec3;
vec1[0] = -(thisRed - dev_cAvg[c][0]);
vec2[0] = thisRed - dev_cAvg[c][0];
vec1[1] = -(thisGreen - dev_cAvg[c][1]);
vec2[1] = thisGreen - dev_cAvg[c][1];
vec1[2] = -(thisBlue - dev_cAvg[c][2]);
vec2[2] = thisBlue - dev_cAvg[c][2];
vec3[0] = vec1[0] * dev_cMatrInv[c][0][0] + vec1[1] * dev_cMatrInv[c][1][0] + vec1[2] * dev_cMatrInv[c][2][0];
vec3[1] = vec1[0] * dev_cMatrInv[c][0][1] + vec1[1] * dev_cMatrInv[c][1][1] + vec1[2] * dev_cMatrInv[c][2][1];
vec3[2] = vec1[0] * dev_cMatrInv[c][0][2] + vec1[1] * dev_cMatrInv[c][1][2] + vec1[2] * dev_cMatrInv[c][2][2];
ans = vec3[0] * vec2[0] + vec3[1] * vec2[1] + vec3[2] * vec2[2];
if (ans > maxAm || argMax == -1) {
maxAm = ans;
argMax = c;
}
}
pixelsOut[j + i * w].w = argMax;
//coloring 3 for report
//if (argMax == 0) {
// pixelsOut[j + i * w].x = 255;
// pixelsOut[j + i * w].y = 0;
// pixelsOut[j + i * w].z = 0;
//}
//else if (argMax == 1) {
// pixelsOut[j + i * w].x = 0;
// pixelsOut[j + i * w].y = 255;
// pixelsOut[j + i * w].z = 0;
//}
//else if (argMax == 2) {
// pixelsOut[j + i * w].x = 0;
// pixelsOut[j + i * w].y = 0;
// pixelsOut[j + i * w].z = 255;
//}
}
}
}
void begin(image* image1, int classes) {
pixels* oldPixels;
int size1 = sizeof(pixels) * image1->width * image1->height;
CSC(cudaMalloc((void**)& oldPixels, size1));
int SIZE = 32;
dim3 gridSz(SIZE, SIZE);
dim3 blockSz(SIZE, SIZE);
CSC(cudaMemcpy(oldPixels, image1->pixs, size1, cudaMemcpyHostToDevice));
cudaEvent_t start, end;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&end));
CSC(cudaEventRecord(start));
Mahalanobisse << < gridSz, blockSz >> > (oldPixels, image1->width, image1->height, classes);
CSC(cudaGetLastError()); //просмотр ошибок
CSC(cudaEventRecord(end));
CSC(cudaEventSynchronize(end));
float t;
CSC(cudaEventElapsedTime(&t, start, end));
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(end));
//конец записи
printf("time = %f\n", t);
CSC(cudaMemcpy(image1->pixs, oldPixels, size1, cudaMemcpyDeviceToHost));
CSC(cudaFree(oldPixels));
}
int main()
{
string input;
string output;
int w;
cin >> input >> output;
image myImage = newImage(input);
w = myImage.width;
int classes;
cin >> classes;
double curRed = 0;
double curGreen = 0;
double curBlue = 0;
pixFloat* cAvg = new pixFloat[classes];
matrix3* cMatr = new matrix3[classes];
matrix3* cMatrInv = new matrix3[classes];
//генерация средних векторов и ков. матриц
for (int i = 0; i < classes; ++i) {
long long pixs_am = 0;
curRed = 0;
curGreen = 0;
curBlue = 0;
cin >> pixs_am;
pixel* pixPairs = new pixel[pixs_am];
for (long long j = 0; j < pixs_am; ++j) {
int X, Y;
cin >> X >> Y;
curRed += (double)myImage.pixs[X + w * Y].x;
curGreen += (double)myImage.pixs[X + w * Y].y;
curBlue += (double)myImage.pixs[X + w * Y].z;
pixPairs[j].x = X;
pixPairs[j].y = Y;
}
curRed /= pixs_am;
curGreen /= pixs_am;
curBlue /= pixs_am;
cAvg[i][0] = curRed;
cAvg[i][1] = curGreen;
cAvg[i][2] = curBlue;
matrix3 totalMatrix;
for (int Ti = 0; Ti < 3; ++Ti) {
for (int Tj = 0; Tj < 3; ++Tj) {
totalMatrix[Ti][Tj] = 0;
}
}
for (int j = 0; j < pixs_am; ++j) {
pixFloat vec;
vec[0] = (double)myImage.pixs[pixPairs[j].x + w * pixPairs[j].y].x - cAvg[i][0];
vec[1] = (double)myImage.pixs[pixPairs[j].x + w * pixPairs[j].y].y - cAvg[i][1];
vec[2] = (double)myImage.pixs[pixPairs[j].x + w * pixPairs[j].y].z - cAvg[i][2];
for (int Ti = 0; Ti < 3; ++Ti) {
for (int Tj = 0; Tj < 3; ++Tj) {
totalMatrix[Ti][Tj] += vec[Ti] * vec[Tj];
}
}
}
for (int Ti = 0; Ti < 3; ++Ti) {
for (int Tj = 0; Tj < 3; ++Tj) {
totalMatrix[Ti][Tj] /= max(0.000001, (double)pixs_am - 1);
cMatr[i][Ti][Tj] = totalMatrix[Ti][Tj];
}
}
delete[] pixPairs;
}
for (int i = 0; i < classes; ++i) {
double det = 0;
det = cMatr[i][0][0] * cMatr[i][1][1] * cMatr[i][2][2] + cMatr[i][0][2] * cMatr[i][1][0] * cMatr[i][2][1] +
cMatr[i][0][1] * cMatr[i][1][2] * cMatr[i][2][0] - cMatr[i][2][0] * cMatr[i][1][1] * cMatr[i][0][2] -
cMatr[i][0][1] * cMatr[i][1][0] * cMatr[i][2][2] - cMatr[i][0][0] * cMatr[i][1][2] * cMatr[i][2][1];
if (det == 0) det = 0.0000001; //чтобы программа не вылетала
matrix3 transp;
for (int x = 0; x < 3; ++x) {
for (int y = 0; y < 3; ++y) {
transp[x][y] = cMatr[i][y][x];
}
}
double dop1 = transp[1][1] * transp[2][2] - transp[1][2] * transp[2][1];
double dop4 = transp[1][2] * transp[2][0] - transp[1][0] * transp[2][2];
double dop7 = transp[1][0] * transp[2][1] - transp[1][1] * transp[2][0];
double dop2 = transp[0][2] * transp[2][1] - transp[0][1] * transp[2][2];
double dop5 = transp[0][0] * transp[2][2] - transp[0][2] * transp[2][0];
double dop8 = transp[0][1] * transp[2][0] - transp[0][0] * transp[2][1];
double dop3 = transp[0][1] * transp[1][2] - transp[0][2] * transp[1][1];
double dop6 = transp[0][2] * transp[1][0] - transp[0][0] * transp[1][2];
double dop9 = transp[0][0] * transp[1][1] - transp[0][1] * transp[1][0];
cMatrInv[i][0][0] = dop1 / det;
cMatrInv[i][0][1] = dop2 / det;
cMatrInv[i][0][2] = dop3 / det;
cMatrInv[i][1][0] = dop4 / det;
cMatrInv[i][1][1] = dop5 / det;
cMatrInv[i][1][2] = dop6 / det;
cMatrInv[i][2][0] = dop7 / det;
cMatrInv[i][2][1] = dop8 / det;
cMatrInv[i][2][2] = dop9 / det;
}
cudaMemcpyToSymbol(dev_cAvg, cAvg, sizeof(pixFloat) * classes, 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dev_cMatrInv, cMatrInv, sizeof(matrix3) * classes, 0, cudaMemcpyHostToDevice);
delete[] cAvg;
delete[] cMatr;
delete[] cMatrInv;
begin(&myImage, classes);
writeToFile(myImage, output);
return 0;
} |
5e208cac72f7d72ab6576c43f7a8bc3ee37b95b1.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <random>
#include <chrono>
#include <math.h>
#include "stdio.h"
#include "bitonic.cuh"
using namespace std;
#define MEMORY_ERROR -1
#define MIN_VALUE -1000.0
#define MAX_VALUE 1000.0
int cmpfunc (const void * a, const void * b) {
float fa = *(const float*) a;
float fb = *(const float*) b;
return (fa > fb) - (fa < fb);
}
void initArray(float *data, int n){
if(data == NULL){
cout << "Not enough memory. Aborting ..." << endl;
free(data);
exit(MEMORY_ERROR);
}else{
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
default_random_engine generator(seed);
uniform_real_distribution<float> distribution(MIN_VALUE, MAX_VALUE);
for(int i = 0; i < n; i++){
data[i] = distribution(generator);
}
}
}
int main(int argc, char *argv[]){
if(argc != 3){
cout << "Wrong number of arguments. Requires number of points(power of 2) & direction" << endl;
exit(-1);
}
int n = atoi(argv[1]);
int dir = atoi(argv[2]);
float *h_data;
h_data = (float *)malloc(n * sizeof(float));
initArray(h_data, n);
// sort using the cuda bitonic
float *d_data;
hipMalloc(&d_data, n * sizeof(float));
hipMemcpy(d_data, h_data, n * sizeof(float), hipMemcpyHostToDevice);
long gpu_time = bitonic(d_data, n, dir);
cout << "Parallel Bitonic GPU time: " << gpu_time << endl;
hipMemcpy(h_data, d_data, n * sizeof(float), hipMemcpyDeviceToHost);
// check according to the given direction
bool success = 1;
for(int i = 1; i < n; i++){
if(dir == 1){
if(h_data[i - 1] > h_data[i]){
success = 0;
cout << "error in i: " << i << ", " << h_data[i - 1] - h_data[i] << endl;
break;
}
}else{
if(h_data[i - 1] < h_data[i]){
success = 0;
cout << "error in i: " << i << ", " << h_data[i - 1] - h_data[i] << endl;
break;
}
}
}
// re suffle array to avoid best case O(n) quicksort
initArray(h_data, n);
// compare with std's quicksort
auto start = std::chrono::high_resolution_clock::now();
qsort(h_data, n, sizeof(float), cmpfunc);
auto finish = std::chrono::high_resolution_clock::now();
long cpu_time = std::chrono::duration_cast<std::chrono::nanoseconds>(finish - start).count();
cout << "Quicksort CPU time: " << cpu_time << endl;
cout << "The result is: " << success << endl;
cout << "Speed up: " << (double)cpu_time / gpu_time << endl;
// free memeory
free(h_data);
return 0;
}
| 5e208cac72f7d72ab6576c43f7a8bc3ee37b95b1.cu | #include <iostream>
#include <stdlib.h>
#include <random>
#include <chrono>
#include <math.h>
#include "stdio.h"
#include "bitonic.cuh"
using namespace std;
#define MEMORY_ERROR -1
#define MIN_VALUE -1000.0
#define MAX_VALUE 1000.0
int cmpfunc (const void * a, const void * b) {
float fa = *(const float*) a;
float fb = *(const float*) b;
return (fa > fb) - (fa < fb);
}
void initArray(float *data, int n){
if(data == NULL){
cout << "Not enough memory. Aborting ..." << endl;
free(data);
exit(MEMORY_ERROR);
}else{
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
default_random_engine generator(seed);
uniform_real_distribution<float> distribution(MIN_VALUE, MAX_VALUE);
for(int i = 0; i < n; i++){
data[i] = distribution(generator);
}
}
}
int main(int argc, char *argv[]){
if(argc != 3){
cout << "Wrong number of arguments. Requires number of points(power of 2) & direction" << endl;
exit(-1);
}
int n = atoi(argv[1]);
int dir = atoi(argv[2]);
float *h_data;
h_data = (float *)malloc(n * sizeof(float));
initArray(h_data, n);
// sort using the cuda bitonic
float *d_data;
cudaMalloc(&d_data, n * sizeof(float));
cudaMemcpy(d_data, h_data, n * sizeof(float), cudaMemcpyHostToDevice);
long gpu_time = bitonic(d_data, n, dir);
cout << "Parallel Bitonic GPU time: " << gpu_time << endl;
cudaMemcpy(h_data, d_data, n * sizeof(float), cudaMemcpyDeviceToHost);
// check according to the given direction
bool success = 1;
for(int i = 1; i < n; i++){
if(dir == 1){
if(h_data[i - 1] > h_data[i]){
success = 0;
cout << "error in i: " << i << ", " << h_data[i - 1] - h_data[i] << endl;
break;
}
}else{
if(h_data[i - 1] < h_data[i]){
success = 0;
cout << "error in i: " << i << ", " << h_data[i - 1] - h_data[i] << endl;
break;
}
}
}
// re suffle array to avoid best case O(n) quicksort
initArray(h_data, n);
// compare with std's quicksort
auto start = std::chrono::high_resolution_clock::now();
qsort(h_data, n, sizeof(float), cmpfunc);
auto finish = std::chrono::high_resolution_clock::now();
long cpu_time = std::chrono::duration_cast<std::chrono::nanoseconds>(finish - start).count();
cout << "Quicksort CPU time: " << cpu_time << endl;
cout << "The result is: " << success << endl;
cout << "Speed up: " << (double)cpu_time / gpu_time << endl;
// free memeory
free(h_data);
return 0;
}
|
50afb462d0b1e92cf7d3c6f39a63ec2befe58338.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <math.h>
using namespace std::chrono;
using namespace std;
#define TPB 256
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define N (NUM_PARTICLES/TPB + 1)
struct particle {
float position[3];
float velocity[3];
};
struct seed {
int x;
int y;
int z;
};
__host__ __device__ float gen_random(int seed, int particle_id, int iteration)
{
float rand_num = (seed * particle_id + iteration) % NUM_PARTICLES;
// printf("seed = %d, particle_id = %d, iteration = %d, rand_num = %e\n",
// seed,
// particle_id,
// iteration,
// rand_num);
return rand_num;
}
__host__ __device__ void updateVelAndPos(particle *particles, seed seed, int iteration, int particle_id)
{
// Velocity update:
particles[particle_id].velocity[0] = gen_random(seed.x, particle_id, iteration);
particles[particle_id].velocity[1] = gen_random(seed.y, particle_id, iteration);
particles[particle_id].velocity[2] = gen_random(seed.z, particle_id, iteration);
// Position update:
particles[particle_id].position[0] = particles[particle_id].position[0] + particles[particle_id].velocity[0];
particles[particle_id].position[1] = particles[particle_id].position[1] + particles[particle_id].velocity[1];
particles[particle_id].position[2] = particles[particle_id].position[2] + particles[particle_id].velocity[2];
}
__global__ void timestepGPU(particle *particles, seed seed, int iteration) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<NUM_PARTICLES) {
// printf("Old threadId = %d, velocity.x = %e, position.x = %e\n",
// threadIdx.x, particles[i].velocity[0], particles[i].position[0]);
updateVelAndPos(particles, seed, iteration, i);
// printf("New threadId = %d, velocity.x = %e\n", threadIdx.x, particles[i].velocity[0]);
}
}
void timestepCPU(particle *particles, seed seed, int iteration) {
for (int i = 0; i < NUM_PARTICLES; i++) {
updateVelAndPos(particles, seed, iteration, i);
}
}
int main()
{
seed seed = {5,6,7};
particle *particlesCPU = new particle[NUM_PARTICLES];
particle *particlesGPU = new particle[NUM_PARTICLES];
// particle *particlesGPU2CPU = new particle[NUM_PARTICLES];
particle *particlesGPU2CPU = NULL;
hipHostMalloc(&particlesGPU2CPU, sizeof(particle) * NUM_PARTICLES);
//////// CPU calculations ////////
auto startCPU = high_resolution_clock::now();
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
timestepCPU(particlesCPU, seed, i);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesCPU[ii].position[0] << "\n";
// }
auto stopCPU = high_resolution_clock::now();
auto durationCPU = duration_cast<milliseconds>(stopCPU - startCPU);
cout << "---------------\n";
//////////////////////////////////
//////// GPU calculations ////////
auto startGPU = high_resolution_clock::now();
hipMalloc(&particlesGPU, sizeof(particle) * NUM_PARTICLES);
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
hipLaunchKernelGGL(( timestepGPU), dim3(N), dim3(TPB), 0, 0, particlesGPU, seed, i);
// hipDeviceSynchronize();
}
hipDeviceSynchronize();
hipMemcpy(particlesGPU2CPU, particlesGPU, sizeof(particle) * NUM_PARTICLES, hipMemcpyDeviceToHost);
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesGPU2CPU[ii].position[0] << "\n";
// }
auto stopGPU = high_resolution_clock::now();
auto durationGPU = duration_cast<milliseconds>(stopGPU - startGPU);
//////////////////////////////////
//////// Compare calculations ////////
float maxError = 0.0f;
for (int particle_i = 0; particle_i < NUM_PARTICLES; particle_i++) {
for (int dim = 0; dim < 3; dim++) {
maxError = fmax(maxError, fabs(
particlesGPU2CPU[particle_i].position[dim] - particlesCPU[particle_i].position[dim]
));
}
}
std::cout << "Max error: " << maxError << std::endl;
// delete[] particlesGPU2CPU;
hipFree(particlesGPU2CPU);
hipFree(particlesGPU);
delete[] particlesCPU;
//////////////////////////////////
cout << "CPU duration in milliseconds: " << durationCPU.count() << endl;
cout << "GPU duration in milliseconds: " << durationGPU.count() << endl;
return 0;
}
| 50afb462d0b1e92cf7d3c6f39a63ec2befe58338.cu | #include <stdio.h>
#include <iostream>
#include <chrono>
#include <math.h>
using namespace std::chrono;
using namespace std;
#define TPB 256
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define N (NUM_PARTICLES/TPB + 1)
struct particle {
float position[3];
float velocity[3];
};
struct seed {
int x;
int y;
int z;
};
__host__ __device__ float gen_random(int seed, int particle_id, int iteration)
{
float rand_num = (seed * particle_id + iteration) % NUM_PARTICLES;
// printf("seed = %d, particle_id = %d, iteration = %d, rand_num = %e\n",
// seed,
// particle_id,
// iteration,
// rand_num);
return rand_num;
}
__host__ __device__ void updateVelAndPos(particle *particles, seed seed, int iteration, int particle_id)
{
// Velocity update:
particles[particle_id].velocity[0] = gen_random(seed.x, particle_id, iteration);
particles[particle_id].velocity[1] = gen_random(seed.y, particle_id, iteration);
particles[particle_id].velocity[2] = gen_random(seed.z, particle_id, iteration);
// Position update:
particles[particle_id].position[0] = particles[particle_id].position[0] + particles[particle_id].velocity[0];
particles[particle_id].position[1] = particles[particle_id].position[1] + particles[particle_id].velocity[1];
particles[particle_id].position[2] = particles[particle_id].position[2] + particles[particle_id].velocity[2];
}
__global__ void timestepGPU(particle *particles, seed seed, int iteration) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<NUM_PARTICLES) {
// printf("Old threadId = %d, velocity.x = %e, position.x = %e\n",
// threadIdx.x, particles[i].velocity[0], particles[i].position[0]);
updateVelAndPos(particles, seed, iteration, i);
// printf("New threadId = %d, velocity.x = %e\n", threadIdx.x, particles[i].velocity[0]);
}
}
void timestepCPU(particle *particles, seed seed, int iteration) {
for (int i = 0; i < NUM_PARTICLES; i++) {
updateVelAndPos(particles, seed, iteration, i);
}
}
int main()
{
seed seed = {5,6,7};
particle *particlesCPU = new particle[NUM_PARTICLES];
particle *particlesGPU = new particle[NUM_PARTICLES];
// particle *particlesGPU2CPU = new particle[NUM_PARTICLES];
particle *particlesGPU2CPU = NULL;
cudaMallocHost(&particlesGPU2CPU, sizeof(particle) * NUM_PARTICLES);
//////// CPU calculations ////////
auto startCPU = high_resolution_clock::now();
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
timestepCPU(particlesCPU, seed, i);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesCPU[ii].position[0] << "\n";
// }
auto stopCPU = high_resolution_clock::now();
auto durationCPU = duration_cast<milliseconds>(stopCPU - startCPU);
cout << "---------------\n";
//////////////////////////////////
//////// GPU calculations ////////
auto startGPU = high_resolution_clock::now();
cudaMalloc(&particlesGPU, sizeof(particle) * NUM_PARTICLES);
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
timestepGPU<<<N, TPB>>>(particlesGPU, seed, i);
// cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
cudaMemcpy(particlesGPU2CPU, particlesGPU, sizeof(particle) * NUM_PARTICLES, cudaMemcpyDeviceToHost);
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesGPU2CPU[ii].position[0] << "\n";
// }
auto stopGPU = high_resolution_clock::now();
auto durationGPU = duration_cast<milliseconds>(stopGPU - startGPU);
//////////////////////////////////
//////// Compare calculations ////////
float maxError = 0.0f;
for (int particle_i = 0; particle_i < NUM_PARTICLES; particle_i++) {
for (int dim = 0; dim < 3; dim++) {
maxError = fmax(maxError, fabs(
particlesGPU2CPU[particle_i].position[dim] - particlesCPU[particle_i].position[dim]
));
}
}
std::cout << "Max error: " << maxError << std::endl;
// delete[] particlesGPU2CPU;
cudaFree(particlesGPU2CPU);
cudaFree(particlesGPU);
delete[] particlesCPU;
//////////////////////////////////
cout << "CPU duration in milliseconds: " << durationCPU.count() << endl;
cout << "GPU duration in milliseconds: " << durationGPU.count() << endl;
return 0;
}
|
f24c8650b3b8096d51775c20818ba85d5a7c4dd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 28.11.2018
//
#include <ops/specials_cuda.h>
//////////////////////////////////////////////////////////////////////////
template<typename T>
__device__
void oesTadKernel(void *vx, Nd4jLong *xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
bool descending) {
auto x = static_cast<T*>(vx);
const int sharedSize = 32768;
__shared__ int xLength;
__shared__ int xTadLength;
__shared__ int numTads;
__shared__ T *shmem;
__shared__ bool cached;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeInfo);
xTadLength = shape::length(tadShapeInfo);
numTads = xLength / xTadLength;
extern __shared__ unsigned char shrd[];
shmem = (T *) shrd;
cached = xTadLength <= (sharedSize / sizeof(T));
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto dx = x + tadOffsets[r];
// this is general loop, we go uncached
int iterations = xTadLength;
if (cached) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = getDevicePosition(tadShapeInfo, tid, xTadLength);
shmem[tid] = dx[t0];
}
__syncthreads();
dx = shmem;
}
for (int i = 0; i < iterations; i++) {
if (i % 2 == 0) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : getDevicePosition(tadShapeInfo, top - 1, xTadLength);
auto t1 = cached ? top : getDevicePosition(tadShapeInfo, top, xTadLength);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : getDevicePosition(tadShapeInfo, top - 1, xTadLength);
auto t1 = cached ? top : getDevicePosition(tadShapeInfo, top, xTadLength);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
}
__syncthreads();
}
if (cached) {
dx = x + tadOffsets[r];
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = getDevicePosition(tadShapeInfo, tid, xTadLength);
dx[t0] = shmem[tid];
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execOesTadKernel(void *vx, Nd4jLong *xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
bool descending) {
oesTadKernel<T>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void oesTadGeneric(dim3 &launchDims, hipStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
bool descending) {
hipLaunchKernelGGL(( execOesTadKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT oesTadGeneric, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending), LIBND4J_TYPES);
| f24c8650b3b8096d51775c20818ba85d5a7c4dd4.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 28.11.2018
//
#include <ops/specials_cuda.h>
//////////////////////////////////////////////////////////////////////////
template<typename T>
__device__
void oesTadKernel(void *vx, Nd4jLong *xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
bool descending) {
auto x = static_cast<T*>(vx);
const int sharedSize = 32768;
__shared__ int xLength;
__shared__ int xTadLength;
__shared__ int numTads;
__shared__ T *shmem;
__shared__ bool cached;
if (threadIdx.x == 0) {
xLength = shape::length(xShapeInfo);
xTadLength = shape::length(tadShapeInfo);
numTads = xLength / xTadLength;
extern __shared__ unsigned char shrd[];
shmem = (T *) shrd;
cached = xTadLength <= (sharedSize / sizeof(T));
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto dx = x + tadOffsets[r];
// this is general loop, we go uncached
int iterations = xTadLength;
if (cached) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = getDevicePosition(tadShapeInfo, tid, xTadLength);
shmem[tid] = dx[t0];
}
__syncthreads();
dx = shmem;
}
for (int i = 0; i < iterations; i++) {
if (i % 2 == 0) {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 1;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : getDevicePosition(tadShapeInfo, top - 1, xTadLength);
auto t1 = cached ? top : getDevicePosition(tadShapeInfo, top, xTadLength);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
} else {
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto top = 2 * tid + 2;
if (top < xTadLength) {
auto t0 = cached ? top - 1 : getDevicePosition(tadShapeInfo, top - 1, xTadLength);
auto t1 = cached ? top : getDevicePosition(tadShapeInfo, top, xTadLength);
if (!descending == (dx[t0] > dx[t1])) {
T dt0 = dx[t0];
dx[t0] = dx[t1];
dx[t1] = dt0;
}
}
}
}
__syncthreads();
}
if (cached) {
dx = x + tadOffsets[r];
for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x) {
auto t0 = getDevicePosition(tadShapeInfo, tid, xTadLength);
dx[t0] = shmem[tid];
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execOesTadKernel(void *vx, Nd4jLong *xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
bool descending) {
oesTadKernel<T>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void oesTadGeneric(dim3 &launchDims, cudaStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets,
bool descending) {
execOesTadKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT oesTadGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending), LIBND4J_TYPES);
|
f9dd720575051225628da14881ad2f88c6577dfb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cstdio>
#include <vector>
#include <helper_cuda.h>
#include <helper_timer.h>
using namespace std;
const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test";
typedef enum {
P2P_WRITE = 0,
P2P_READ = 1,
} P2PDataTransfer;
typedef enum {
CE = 0,
SM = 1,
} P2PEngine;
P2PEngine p2p_mechanism = CE; // By default use Copy Engine
// Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() \
{ \
hipError_t e = hipGetLastError(); \
if (e != hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, \
hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void delay(volatile int *flag,
unsigned long long timeout_clocks = 10000000) {
// Wait until the application notifies us that it has completed queuing up the
// experiment, or timeout and exit, allowing the application to make progress
long long int start_clock, sample_clock;
start_clock = clock64();
while (!*flag) {
sample_clock = clock64();
if (sample_clock - start_clock > timeout_clocks) {
break;
}
}
}
// This kernel is for demonstration purposes only, not a performant kernel for
// p2p transfers.
__global__ void copyp2p(int4 *__restrict__ dest, int4 const *__restrict__ src,
size_t num_elems) {
size_t globalId = blockIdx.x * blockDim.x + threadIdx.x;
size_t gridSize = blockDim.x * gridDim.x;
#pragma unroll(5)
for (size_t i = globalId; i < num_elems; i += gridSize) {
dest[i] = src[i];
}
}
///////////////////////////////////////////////////////////////////////////
// Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void) {
printf("Usage: p2pBandwidthLatencyTest [OPTION]...\n");
printf("Tests bandwidth/latency of GPU pairs using P2P and without P2P\n");
printf("\n");
printf("Options:\n");
printf("--help\t\tDisplay this help menu\n");
printf(
"--p2p_read\tUse P2P reads for data transfers between GPU pairs and show "
"corresponding results.\n \t\tDefault used is P2P write operation.\n");
printf("--sm_copy Use SM intiated p2p transfers instead of Copy Engine\n");
printf("--numElems=<NUM_OF_INT_ELEMS> Number of integer elements to be used in p2p copy.\n");
}
void checkP2Paccess(int numGPUs) {
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
cudaCheckError();
for (int j = 0; j < numGPUs; j++) {
int access;
if (i != j) {
hipDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("Device=%d %s Access Peer Device=%d\n", i,
access ? "CAN" : "CANNOT", j);
}
}
}
printf(
"\n***NOTE: In case a device doesn't have P2P access to other one, it "
"falls back to normal memcopy procedure.\nSo you can see lesser "
"Bandwidth (GB/s) and unstable Latency (us) in those cases.\n\n");
}
void performP2PCopy(int *dest, int destDevice, int *src, int srcDevice,
int num_elems, int repeat, bool p2paccess,
hipStream_t streamToRun) {
int blockSize = 0;
int numBlocks = 0;
hipOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize, copyp2p);
cudaCheckError();
if (p2p_mechanism == SM && p2paccess) {
for (int r = 0; r < repeat; r++) {
hipLaunchKernelGGL(( copyp2p), dim3(numBlocks), dim3(blockSize), 0, streamToRun,
(int4 *)dest, (int4 *)src, num_elems / 4);
}
} else {
for (int r = 0; r < repeat; r++) {
hipMemcpyPeerAsync(dest, destDevice, src, srcDevice,
sizeof(int) * num_elems, streamToRun);
}
}
}
void outputBandwidthMatrix(int numElems, int numGPUs, bool p2p, P2PDataTransfer p2p_method) {
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<hipEvent_t> start(numGPUs);
vector<hipEvent_t> stop(numGPUs);
vector<hipStream_t> stream(numGPUs);
hipHostMalloc((void **)&flag, sizeof(*flag), hipHostMallocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipStreamCreateWithFlags(&stream[d], hipStreamNonBlocking);
hipMalloc(&buffers[d], numElems * sizeof(int));
cudaCheckError();
hipMemset(buffers[d], 0, numElems * sizeof(int));
cudaCheckError();
hipMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaCheckError();
hipMemset(buffersD2D[d], 0, numElems * sizeof(int));
cudaCheckError();
hipEventCreate(&start[d]);
cudaCheckError();
hipEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access = 0;
if (p2p) {
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
hipDeviceEnablePeerAccess(j, 0);
cudaCheckError();
hipSetDevice(j);
cudaCheckError();
hipDeviceEnablePeerAccess(i, 0);
cudaCheckError();
hipSetDevice(i);
cudaCheckError();
}
}
hipStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - hipMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
hipLaunchKernelGGL(( delay), dim3(1), dim3(1), 0, stream[i], flag);
cudaCheckError();
hipEventRecord(start[i], stream[i]);
cudaCheckError();
if (i == j) {
// Perform intra-GPU, D2D copies
performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
access, stream[i]);
} else {
if (p2p_method == P2P_WRITE) {
performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
stream[i]);
} else {
performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
stream[i]);
}
}
hipEventRecord(stop[i], stream[i]);
cudaCheckError();
// Release the queued events
*flag = 1;
hipStreamSynchronize(stream[i]);
cudaCheckError();
float time_ms;
hipEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; // must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
hipDeviceDisablePeerAccess(j);
hipSetDevice(j);
hipDeviceDisablePeerAccess(i);
hipSetDevice(i);
cudaCheckError();
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipFree(buffers[d]);
hipFree(buffersD2D[d]);
cudaCheckError();
hipEventDestroy(start[d]);
cudaCheckError();
hipEventDestroy(stop[d]);
cudaCheckError();
hipStreamDestroy(stream[d]);
cudaCheckError();
}
hipHostFree((void *)flag);
cudaCheckError();
}
void outputBidirectionalBandwidthMatrix(int numElems, int numGPUs, bool p2p) {
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs);
vector<hipEvent_t> start(numGPUs);
vector<hipEvent_t> stop(numGPUs);
vector<hipStream_t> stream0(numGPUs);
vector<hipStream_t> stream1(numGPUs);
hipHostMalloc((void **)&flag, sizeof(*flag), hipHostMallocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipMalloc(&buffers[d], numElems * sizeof(int));
hipMemset(buffers[d], 0, numElems * sizeof(int));
hipMalloc(&buffersD2D[d], numElems * sizeof(int));
hipMemset(buffersD2D[d], 0, numElems * sizeof(int));
cudaCheckError();
hipEventCreate(&start[d]);
cudaCheckError();
hipEventCreate(&stop[d]);
cudaCheckError();
hipStreamCreateWithFlags(&stream0[d], hipStreamNonBlocking);
cudaCheckError();
hipStreamCreateWithFlags(&stream1[d], hipStreamNonBlocking);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access = 0;
if (p2p) {
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
hipSetDevice(i);
hipDeviceEnablePeerAccess(j, 0);
cudaCheckError();
hipSetDevice(j);
hipDeviceEnablePeerAccess(i, 0);
cudaCheckError();
}
}
hipSetDevice(i);
hipStreamSynchronize(stream0[i]);
hipStreamSynchronize(stream1[j]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - hipMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
hipSetDevice(i);
// No need to block stream1 since it'll be blocked on stream0's event
hipLaunchKernelGGL(( delay), dim3(1), dim3(1), 0, stream0[i], flag);
cudaCheckError();
// Force stream1 not to start until stream0 does, in order to ensure
// the events on stream0 fully encompass the time needed for all
// operations
hipEventRecord(start[i], stream0[i]);
hipStreamWaitEvent(stream1[j], start[i], 0);
if (i == j) {
// For intra-GPU perform 2 memcopies buffersD2D <-> buffers
performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
access, stream0[i]);
performP2PCopy(buffersD2D[i], i, buffers[i], i, numElems, repeat,
access, stream1[i]);
} else {
if (access && p2p_mechanism == SM) {
hipSetDevice(j);
}
performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
stream1[j]);
if (access && p2p_mechanism == SM) {
hipSetDevice(i);
}
performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
stream0[i]);
}
// Notify stream0 that stream1 is complete and record the time of
// the total transaction
hipEventRecord(stop[j], stream1[j]);
hipStreamWaitEvent(stream0[i], stop[j], 0);
hipEventRecord(stop[i], stream0[i]);
// Release the queued operations
*flag = 1;
hipStreamSynchronize(stream0[i]);
hipStreamSynchronize(stream1[j]);
cudaCheckError();
float time_ms;
hipEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = 2.0 * numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; // must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
hipSetDevice(i);
hipDeviceDisablePeerAccess(j);
hipSetDevice(j);
hipDeviceDisablePeerAccess(i);
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipFree(buffers[d]);
hipFree(buffersD2D[d]);
cudaCheckError();
hipEventDestroy(start[d]);
cudaCheckError();
hipEventDestroy(stop[d]);
cudaCheckError();
hipStreamDestroy(stream0[d]);
cudaCheckError();
hipStreamDestroy(stream1[d]);
cudaCheckError();
}
hipHostFree((void *)flag);
cudaCheckError();
}
void outputLatencyMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method) {
int repeat = 100;
int numElems = 4; // perform 1-int4 transfer.
volatile int *flag = NULL;
StopWatchInterface *stopWatch = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<hipStream_t> stream(numGPUs);
vector<hipEvent_t> start(numGPUs);
vector<hipEvent_t> stop(numGPUs);
hipHostMalloc((void **)&flag, sizeof(*flag), hipHostMallocPortable);
cudaCheckError();
if (!sdkCreateTimer(&stopWatch)) {
printf("Failed to create stop watch\n");
exit(EXIT_FAILURE);
}
sdkStartTimer(&stopWatch);
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipStreamCreateWithFlags(&stream[d], hipStreamNonBlocking);
hipMalloc(&buffers[d], sizeof(int) * numElems);
hipMemset(buffers[d], 0, sizeof(int) * numElems);
hipMalloc(&buffersD2D[d], sizeof(int) * numElems);
hipMemset(buffersD2D[d], 0, sizeof(int) * numElems);
cudaCheckError();
hipEventCreate(&start[d]);
cudaCheckError();
hipEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> gpuLatencyMatrix(numGPUs * numGPUs);
vector<double> cpuLatencyMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
hipSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access = 0;
if (p2p) {
hipDeviceCanAccessPeer(&access, i, j);
if (access) {
hipDeviceEnablePeerAccess(j, 0);
cudaCheckError();
hipSetDevice(j);
hipDeviceEnablePeerAccess(i, 0);
hipSetDevice(i);
cudaCheckError();
}
}
hipStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - hipMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
hipLaunchKernelGGL(( delay), dim3(1), dim3(1), 0, stream[i], flag);
cudaCheckError();
hipEventRecord(start[i], stream[i]);
sdkResetTimer(&stopWatch);
if (i == j) {
// Perform intra-GPU, D2D copies
performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
access, stream[i]);
} else {
if (p2p_method == P2P_WRITE) {
performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
stream[i]);
} else {
performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
stream[i]);
}
}
float cpu_time_ms = sdkGetTimerValue(&stopWatch);
hipEventRecord(stop[i], stream[i]);
// Now that the work has been queued up, release the stream
*flag = 1;
hipStreamSynchronize(stream[i]);
cudaCheckError();
float gpu_time_ms;
hipEventElapsedTime(&gpu_time_ms, start[i], stop[i]);
gpuLatencyMatrix[i * numGPUs + j] = gpu_time_ms * 1e3 / repeat;
cpuLatencyMatrix[i * numGPUs + j] = cpu_time_ms * 1e3 / repeat;
if (p2p && access) {
hipDeviceDisablePeerAccess(j);
hipSetDevice(j);
hipDeviceDisablePeerAccess(i);
hipSetDevice(i);
cudaCheckError();
}
}
}
printf(" GPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", gpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
printf("\n CPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", cpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
hipSetDevice(d);
hipFree(buffers[d]);
hipFree(buffersD2D[d]);
cudaCheckError();
hipEventDestroy(start[d]);
cudaCheckError();
hipEventDestroy(stop[d]);
cudaCheckError();
hipStreamDestroy(stream[d]);
cudaCheckError();
}
sdkDeleteTimer(&stopWatch);
hipHostFree((void *)flag);
cudaCheckError();
}
int main(int argc, char **argv) {
int numGPUs, numElems = 40000000;
P2PDataTransfer p2p_method = P2P_WRITE;
hipGetDeviceCount(&numGPUs);
cudaCheckError();
// process command line args
if (checkCmdLineFlag(argc, (const char **)argv, "help")) {
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, (const char **)argv, "p2p_read")) {
p2p_method = P2P_READ;
}
if (checkCmdLineFlag(argc, (const char **)argv, "sm_copy")) {
p2p_mechanism = SM;
}
// number of elements of int to be used in copy.
if (checkCmdLineFlag(argc, (const char **)argv, "numElems")) {
numElems = getCmdLineArgumentInt(argc, (const char **)argv, "numElems");
}
printf("[%s]\n", sSampleName);
// output devices
for (int i = 0; i < numGPUs; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cudaCheckError();
printf("Device: %d, %s, pciBusID: %x, pciDeviceID: %x, pciDomainID:%x\n", i,
prop.name, prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
}
checkP2Paccess(numGPUs);
// Check peer-to-peer connectivity
printf("P2P Connectivity Matrix\n");
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d\t", i);
for (int j = 0; j < numGPUs; j++) {
if (i != j) {
int access;
hipDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("%6d", (access) ? 1 : 0);
} else {
printf("%6d", 1);
}
}
printf("\n");
}
printf("Unidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBandwidthMatrix(numElems, numGPUs, false, P2P_WRITE);
printf("Unidirectional P2P=Enabled Bandwidth (P2P Writes) Matrix (GB/s)\n");
outputBandwidthMatrix(numElems, numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ) {
printf("Unidirectional P2P=Enabled Bandwidth (P2P Reads) Matrix (GB/s)\n");
outputBandwidthMatrix(numElems, numGPUs, true, p2p_method);
}
printf("Bidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numElems, numGPUs, false);
printf("Bidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numElems, numGPUs, true);
printf("P2P=Disabled Latency Matrix (us)\n");
outputLatencyMatrix(numGPUs, false, P2P_WRITE);
printf("P2P=Enabled Latency (P2P Writes) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ) {
printf("P2P=Enabled Latency (P2P Reads) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, p2p_method);
}
printf(
"\nNOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
| f9dd720575051225628da14881ad2f88c6577dfb.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cstdio>
#include <vector>
#include <helper_cuda.h>
#include <helper_timer.h>
using namespace std;
const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test";
typedef enum {
P2P_WRITE = 0,
P2P_READ = 1,
} P2PDataTransfer;
typedef enum {
CE = 0,
SM = 1,
} P2PEngine;
P2PEngine p2p_mechanism = CE; // By default use Copy Engine
// Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() \
{ \
cudaError_t e = cudaGetLastError(); \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, \
cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void delay(volatile int *flag,
unsigned long long timeout_clocks = 10000000) {
// Wait until the application notifies us that it has completed queuing up the
// experiment, or timeout and exit, allowing the application to make progress
long long int start_clock, sample_clock;
start_clock = clock64();
while (!*flag) {
sample_clock = clock64();
if (sample_clock - start_clock > timeout_clocks) {
break;
}
}
}
// This kernel is for demonstration purposes only, not a performant kernel for
// p2p transfers.
__global__ void copyp2p(int4 *__restrict__ dest, int4 const *__restrict__ src,
size_t num_elems) {
size_t globalId = blockIdx.x * blockDim.x + threadIdx.x;
size_t gridSize = blockDim.x * gridDim.x;
#pragma unroll(5)
for (size_t i = globalId; i < num_elems; i += gridSize) {
dest[i] = src[i];
}
}
///////////////////////////////////////////////////////////////////////////
// Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void) {
printf("Usage: p2pBandwidthLatencyTest [OPTION]...\n");
printf("Tests bandwidth/latency of GPU pairs using P2P and without P2P\n");
printf("\n");
printf("Options:\n");
printf("--help\t\tDisplay this help menu\n");
printf(
"--p2p_read\tUse P2P reads for data transfers between GPU pairs and show "
"corresponding results.\n \t\tDefault used is P2P write operation.\n");
printf("--sm_copy Use SM intiated p2p transfers instead of Copy Engine\n");
printf("--numElems=<NUM_OF_INT_ELEMS> Number of integer elements to be used in p2p copy.\n");
}
void checkP2Paccess(int numGPUs) {
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
cudaCheckError();
for (int j = 0; j < numGPUs; j++) {
int access;
if (i != j) {
cudaDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("Device=%d %s Access Peer Device=%d\n", i,
access ? "CAN" : "CANNOT", j);
}
}
}
printf(
"\n***NOTE: In case a device doesn't have P2P access to other one, it "
"falls back to normal memcopy procedure.\nSo you can see lesser "
"Bandwidth (GB/s) and unstable Latency (us) in those cases.\n\n");
}
void performP2PCopy(int *dest, int destDevice, int *src, int srcDevice,
int num_elems, int repeat, bool p2paccess,
cudaStream_t streamToRun) {
int blockSize = 0;
int numBlocks = 0;
cudaOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize, copyp2p);
cudaCheckError();
if (p2p_mechanism == SM && p2paccess) {
for (int r = 0; r < repeat; r++) {
copyp2p<<<numBlocks, blockSize, 0, streamToRun>>>(
(int4 *)dest, (int4 *)src, num_elems / 4);
}
} else {
for (int r = 0; r < repeat; r++) {
cudaMemcpyPeerAsync(dest, destDevice, src, srcDevice,
sizeof(int) * num_elems, streamToRun);
}
}
}
void outputBandwidthMatrix(int numElems, int numGPUs, bool p2p, P2PDataTransfer p2p_method) {
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
vector<cudaStream_t> stream(numGPUs);
cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaStreamCreateWithFlags(&stream[d], cudaStreamNonBlocking);
cudaMalloc(&buffers[d], numElems * sizeof(int));
cudaCheckError();
cudaMemset(buffers[d], 0, numElems * sizeof(int));
cudaCheckError();
cudaMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaCheckError();
cudaMemset(buffersD2D[d], 0, numElems * sizeof(int));
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access = 0;
if (p2p) {
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
cudaDeviceEnablePeerAccess(j, 0);
cudaCheckError();
cudaSetDevice(j);
cudaCheckError();
cudaDeviceEnablePeerAccess(i, 0);
cudaCheckError();
cudaSetDevice(i);
cudaCheckError();
}
}
cudaStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - cudaMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
delay<<<1, 1, 0, stream[i]>>>(flag);
cudaCheckError();
cudaEventRecord(start[i], stream[i]);
cudaCheckError();
if (i == j) {
// Perform intra-GPU, D2D copies
performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
access, stream[i]);
} else {
if (p2p_method == P2P_WRITE) {
performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
stream[i]);
} else {
performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
stream[i]);
}
}
cudaEventRecord(stop[i], stream[i]);
cudaCheckError();
// Release the queued events
*flag = 1;
cudaStreamSynchronize(stream[i]);
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; // must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
cudaDeviceDisablePeerAccess(j);
cudaSetDevice(j);
cudaDeviceDisablePeerAccess(i);
cudaSetDevice(i);
cudaCheckError();
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaFree(buffersD2D[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream[d]);
cudaCheckError();
}
cudaFreeHost((void *)flag);
cudaCheckError();
}
void outputBidirectionalBandwidthMatrix(int numElems, int numGPUs, bool p2p) {
int repeat = 5;
volatile int *flag = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
vector<cudaStream_t> stream0(numGPUs);
vector<cudaStream_t> stream1(numGPUs);
cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
cudaCheckError();
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaMalloc(&buffers[d], numElems * sizeof(int));
cudaMemset(buffers[d], 0, numElems * sizeof(int));
cudaMalloc(&buffersD2D[d], numElems * sizeof(int));
cudaMemset(buffersD2D[d], 0, numElems * sizeof(int));
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
cudaStreamCreateWithFlags(&stream0[d], cudaStreamNonBlocking);
cudaCheckError();
cudaStreamCreateWithFlags(&stream1[d], cudaStreamNonBlocking);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access = 0;
if (p2p) {
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
cudaSetDevice(i);
cudaDeviceEnablePeerAccess(j, 0);
cudaCheckError();
cudaSetDevice(j);
cudaDeviceEnablePeerAccess(i, 0);
cudaCheckError();
}
}
cudaSetDevice(i);
cudaStreamSynchronize(stream0[i]);
cudaStreamSynchronize(stream1[j]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - cudaMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
cudaSetDevice(i);
// No need to block stream1 since it'll be blocked on stream0's event
delay<<<1, 1, 0, stream0[i]>>>(flag);
cudaCheckError();
// Force stream1 not to start until stream0 does, in order to ensure
// the events on stream0 fully encompass the time needed for all
// operations
cudaEventRecord(start[i], stream0[i]);
cudaStreamWaitEvent(stream1[j], start[i], 0);
if (i == j) {
// For intra-GPU perform 2 memcopies buffersD2D <-> buffers
performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
access, stream0[i]);
performP2PCopy(buffersD2D[i], i, buffers[i], i, numElems, repeat,
access, stream1[i]);
} else {
if (access && p2p_mechanism == SM) {
cudaSetDevice(j);
}
performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
stream1[j]);
if (access && p2p_mechanism == SM) {
cudaSetDevice(i);
}
performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
stream0[i]);
}
// Notify stream0 that stream1 is complete and record the time of
// the total transaction
cudaEventRecord(stop[j], stream1[j]);
cudaStreamWaitEvent(stream0[i], stop[j], 0);
cudaEventRecord(stop[i], stream0[i]);
// Release the queued operations
*flag = 1;
cudaStreamSynchronize(stream0[i]);
cudaStreamSynchronize(stream1[j]);
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms, start[i], stop[i]);
double time_s = time_ms / 1e3;
double gb = 2.0 * numElems * sizeof(int) * repeat / (double)1e9;
if (i == j) {
gb *= 2; // must count both the read and the write here
}
bandwidthMatrix[i * numGPUs + j] = gb / time_s;
if (p2p && access) {
cudaSetDevice(i);
cudaDeviceDisablePeerAccess(j);
cudaSetDevice(j);
cudaDeviceDisablePeerAccess(i);
}
}
}
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", bandwidthMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaFree(buffersD2D[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream0[d]);
cudaCheckError();
cudaStreamDestroy(stream1[d]);
cudaCheckError();
}
cudaFreeHost((void *)flag);
cudaCheckError();
}
void outputLatencyMatrix(int numGPUs, bool p2p, P2PDataTransfer p2p_method) {
int repeat = 100;
int numElems = 4; // perform 1-int4 transfer.
volatile int *flag = NULL;
StopWatchInterface *stopWatch = NULL;
vector<int *> buffers(numGPUs);
vector<int *> buffersD2D(numGPUs); // buffer for D2D, that is, intra-GPU copy
vector<cudaStream_t> stream(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
cudaHostAlloc((void **)&flag, sizeof(*flag), cudaHostAllocPortable);
cudaCheckError();
if (!sdkCreateTimer(&stopWatch)) {
printf("Failed to create stop watch\n");
exit(EXIT_FAILURE);
}
sdkStartTimer(&stopWatch);
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaStreamCreateWithFlags(&stream[d], cudaStreamNonBlocking);
cudaMalloc(&buffers[d], sizeof(int) * numElems);
cudaMemset(buffers[d], 0, sizeof(int) * numElems);
cudaMalloc(&buffersD2D[d], sizeof(int) * numElems);
cudaMemset(buffersD2D[d], 0, sizeof(int) * numElems);
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> gpuLatencyMatrix(numGPUs * numGPUs);
vector<double> cpuLatencyMatrix(numGPUs * numGPUs);
for (int i = 0; i < numGPUs; i++) {
cudaSetDevice(i);
for (int j = 0; j < numGPUs; j++) {
int access = 0;
if (p2p) {
cudaDeviceCanAccessPeer(&access, i, j);
if (access) {
cudaDeviceEnablePeerAccess(j, 0);
cudaCheckError();
cudaSetDevice(j);
cudaDeviceEnablePeerAccess(i, 0);
cudaSetDevice(i);
cudaCheckError();
}
}
cudaStreamSynchronize(stream[i]);
cudaCheckError();
// Block the stream until all the work is queued up
// DANGER! - cudaMemcpy*Async may infinitely block waiting for
// room to push the operation, so keep the number of repeatitions
// relatively low. Higher repeatitions will cause the delay kernel
// to timeout and lead to unstable results.
*flag = 0;
delay<<<1, 1, 0, stream[i]>>>(flag);
cudaCheckError();
cudaEventRecord(start[i], stream[i]);
sdkResetTimer(&stopWatch);
if (i == j) {
// Perform intra-GPU, D2D copies
performP2PCopy(buffers[i], i, buffersD2D[i], i, numElems, repeat,
access, stream[i]);
} else {
if (p2p_method == P2P_WRITE) {
performP2PCopy(buffers[j], j, buffers[i], i, numElems, repeat, access,
stream[i]);
} else {
performP2PCopy(buffers[i], i, buffers[j], j, numElems, repeat, access,
stream[i]);
}
}
float cpu_time_ms = sdkGetTimerValue(&stopWatch);
cudaEventRecord(stop[i], stream[i]);
// Now that the work has been queued up, release the stream
*flag = 1;
cudaStreamSynchronize(stream[i]);
cudaCheckError();
float gpu_time_ms;
cudaEventElapsedTime(&gpu_time_ms, start[i], stop[i]);
gpuLatencyMatrix[i * numGPUs + j] = gpu_time_ms * 1e3 / repeat;
cpuLatencyMatrix[i * numGPUs + j] = cpu_time_ms * 1e3 / repeat;
if (p2p && access) {
cudaDeviceDisablePeerAccess(j);
cudaSetDevice(j);
cudaDeviceDisablePeerAccess(i);
cudaSetDevice(i);
cudaCheckError();
}
}
}
printf(" GPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", gpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
printf("\n CPU");
for (int j = 0; j < numGPUs; j++) {
printf("%6d ", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d ", i);
for (int j = 0; j < numGPUs; j++) {
printf("%6.02f ", cpuLatencyMatrix[i * numGPUs + j]);
}
printf("\n");
}
for (int d = 0; d < numGPUs; d++) {
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaFree(buffersD2D[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream[d]);
cudaCheckError();
}
sdkDeleteTimer(&stopWatch);
cudaFreeHost((void *)flag);
cudaCheckError();
}
int main(int argc, char **argv) {
int numGPUs, numElems = 40000000;
P2PDataTransfer p2p_method = P2P_WRITE;
cudaGetDeviceCount(&numGPUs);
cudaCheckError();
// process command line args
if (checkCmdLineFlag(argc, (const char **)argv, "help")) {
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, (const char **)argv, "p2p_read")) {
p2p_method = P2P_READ;
}
if (checkCmdLineFlag(argc, (const char **)argv, "sm_copy")) {
p2p_mechanism = SM;
}
// number of elements of int to be used in copy.
if (checkCmdLineFlag(argc, (const char **)argv, "numElems")) {
numElems = getCmdLineArgumentInt(argc, (const char **)argv, "numElems");
}
printf("[%s]\n", sSampleName);
// output devices
for (int i = 0; i < numGPUs; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cudaCheckError();
printf("Device: %d, %s, pciBusID: %x, pciDeviceID: %x, pciDomainID:%x\n", i,
prop.name, prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
}
checkP2Paccess(numGPUs);
// Check peer-to-peer connectivity
printf("P2P Connectivity Matrix\n");
printf(" D\\D");
for (int j = 0; j < numGPUs; j++) {
printf("%6d", j);
}
printf("\n");
for (int i = 0; i < numGPUs; i++) {
printf("%6d\t", i);
for (int j = 0; j < numGPUs; j++) {
if (i != j) {
int access;
cudaDeviceCanAccessPeer(&access, i, j);
cudaCheckError();
printf("%6d", (access) ? 1 : 0);
} else {
printf("%6d", 1);
}
}
printf("\n");
}
printf("Unidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBandwidthMatrix(numElems, numGPUs, false, P2P_WRITE);
printf("Unidirectional P2P=Enabled Bandwidth (P2P Writes) Matrix (GB/s)\n");
outputBandwidthMatrix(numElems, numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ) {
printf("Unidirectional P2P=Enabled Bandwidth (P2P Reads) Matrix (GB/s)\n");
outputBandwidthMatrix(numElems, numGPUs, true, p2p_method);
}
printf("Bidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numElems, numGPUs, false);
printf("Bidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numElems, numGPUs, true);
printf("P2P=Disabled Latency Matrix (us)\n");
outputLatencyMatrix(numGPUs, false, P2P_WRITE);
printf("P2P=Enabled Latency (P2P Writes) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, P2P_WRITE);
if (p2p_method == P2P_READ) {
printf("P2P=Enabled Latency (P2P Reads) Matrix (us)\n");
outputLatencyMatrix(numGPUs, true, p2p_method);
}
printf(
"\nNOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
|
bcc238047fd6fc7c36b0b277544400b0c0be5696.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <math.h>
#include <string.h>
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "Profiler.h"
#include "Printers.h"
#include "Iterations.h"
///
/// \brief main
/// \param argc
/// \param argv
/// \return
///
int main(int argc, char** argv)
{
for (unsigned int N = INITIAL_SIZE_3D; N <= LIMIT_3D; INCREMENT)
{
for (unsigned int itr = 0; itr < NUM_ITERATIONS; itr++)
{
const unsigned int NX = N;
const unsigned int NY = N;
const unsigned int NZ = N;
// Data size
const size_t dataSizeCUFFT = sizeof(float)* NX * NY * NZ;
// CPU allocation
float *dataCPU;
dataCPU = (float*) malloc(dataSizeCUFFT);
// GPU allocation
float* dataGPU;
hipMalloc((void**)&dataGPU, dataSizeCUFFT);
for (unsigned int i = 0; i < NX * NY * NZ; i++)
{
dataCPU[i] = float(i % 256) + i * 0.00001;
}
char profileName [2048] = "dataUpload";
char N_char[1024];
sprintf(N_char, "__%dx%dx%d__%d", NX, NY, NZ, itr);
strcat(profileName, N_char);
// Upload the random array to the GPU
START_PROFILING(profileName);
hipMemcpy(dataGPU, dataCPU, dataSizeCUFFT, hipMemcpyHostToDevice);
END_PROFILING();
// Download the results to the CPU array
hipMemcpy(dataCPU, dataGPU, dataSizeCUFFT, hipMemcpyDeviceToHost);
// Release the data on the GPU
hipFree(dataGPU);
// Release the data on the CPU
free(dataCPU);
}
}
return 0;
}
| bcc238047fd6fc7c36b0b277544400b0c0be5696.cu | #include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <math.h>
#include <string.h>
#include <iostream>
#include <fstream>
#include <cuda.h>
#include <cufft.h>
#include "Profiler.h"
#include "Printers.h"
#include "Iterations.h"
///
/// \brief main
/// \param argc
/// \param argv
/// \return
///
int main(int argc, char** argv)
{
for (unsigned int N = INITIAL_SIZE_3D; N <= LIMIT_3D; INCREMENT)
{
for (unsigned int itr = 0; itr < NUM_ITERATIONS; itr++)
{
const unsigned int NX = N;
const unsigned int NY = N;
const unsigned int NZ = N;
// Data size
const size_t dataSizeCUFFT = sizeof(float)* NX * NY * NZ;
// CPU allocation
float *dataCPU;
dataCPU = (float*) malloc(dataSizeCUFFT);
// GPU allocation
float* dataGPU;
cudaMalloc((void**)&dataGPU, dataSizeCUFFT);
for (unsigned int i = 0; i < NX * NY * NZ; i++)
{
dataCPU[i] = float(i % 256) + i * 0.00001;
}
char profileName [2048] = "dataUpload";
char N_char[1024];
sprintf(N_char, "__%dx%dx%d__%d", NX, NY, NZ, itr);
strcat(profileName, N_char);
// Upload the random array to the GPU
START_PROFILING(profileName);
cudaMemcpy(dataGPU, dataCPU, dataSizeCUFFT, cudaMemcpyHostToDevice);
END_PROFILING();
// Download the results to the CPU array
cudaMemcpy(dataCPU, dataGPU, dataSizeCUFFT, cudaMemcpyDeviceToHost);
// Release the data on the GPU
cudaFree(dataGPU);
// Release the data on the CPU
free(dataCPU);
}
}
return 0;
}
|
c01bb8a1705041edb5d6759f4b208466f09e4074.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "knn_hip.cuh"
#include <iostream>
__global__ void compute_nearest_neighbors(const int* histo_tab,
const size_t histo_pitch,
const float* clusters,
const size_t cluster_pitch,
int* results,
const size_t tiles_number)
{
size_t x = threadIdx.x;
size_t tile_index = blockIdx.x;
if (x >= 256 || tile_index >= tiles_number)
return;
__shared__ float cluster_distances[16];
for (auto y = 0; x == 0 && y < 16; y++)
cluster_distances[y] = 0;
__syncthreads();
int value = *(histo_tab + tile_index * histo_pitch / sizeof(int) + x);
for (auto y = 0; y < 16; y++)
{
float cluster_value = *(clusters + y * cluster_pitch / sizeof(float) + x);
// Euclidean distance
float local_distance = (cluster_value - value) * (cluster_value - value);
atomicAdd(cluster_distances + y, local_distance);
}
__syncthreads();
if (x != 0)
return;
auto result_ptr = results + tile_index;
*result_ptr = 0;
for (int i = 1; i < 16; i++)
{
if (cluster_distances[*result_ptr] > cluster_distances[i])
*result_ptr = i;
}
}
int* k_nearest_neighbors(const int* histo_tab, const float* clusters, const size_t tiles_number)
{
hipError_t rc = hipSuccess;
int* cuda_histo_tab;
size_t cuda_histo_tab_pitch;
auto tile_byte_size = 256 * sizeof(int);
rc = hipMallocPitch(&cuda_histo_tab, &cuda_histo_tab_pitch, tile_byte_size, tiles_number);
if (rc)
{
std::cout << "Could not allocate memory for tiles histogram"
<< "on the device when computing nearest_neighbors\n";
exit(EXIT_FAILURE);
}
rc = hipMemcpy2D(cuda_histo_tab, cuda_histo_tab_pitch, histo_tab, tile_byte_size,
tile_byte_size, tiles_number, hipMemcpyHostToDevice);
if (rc)
{
std::cout << "Could not copy memory for tiles histogram"
<< "on the device when computing nearest_neighbors\n";
exit(EXIT_FAILURE);
}
float* cuda_clusters;
size_t cuda_clusters_pitch;
auto cluster_byte_size = 256 * sizeof(float);
rc = hipMallocPitch(&cuda_clusters, &cuda_clusters_pitch, cluster_byte_size, 16);
if (rc)
{
std::cout << "Could not allocate memory for clusters"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
rc = hipMemcpy2D(cuda_clusters, cuda_clusters_pitch, clusters, cluster_byte_size,
cluster_byte_size, 16, hipMemcpyHostToDevice);
if (rc)
{
std::cout << "Could not copy memory for clusters"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
int* result;
rc = hipMalloc(&result, sizeof(int) * tiles_number);
if (rc)
{
std::cout << "Could not allocate memory for nearest neighbors result"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
dim3 block_dim(256, 16);
hipLaunchKernelGGL(( compute_nearest_neighbors), dim3(tiles_number), dim3(256), 0, 0, cuda_histo_tab, cuda_histo_tab_pitch,
cuda_clusters, cuda_clusters_pitch, result, tiles_number);
int* output = (int*) malloc(sizeof(int) * tiles_number);
rc = hipMemcpy(output, result, sizeof(int) * tiles_number, hipMemcpyDeviceToHost);
hipFree(cuda_clusters);
hipFree(result);
return output;
}
| c01bb8a1705041edb5d6759f4b208466f09e4074.cu | #include "knn.cuh"
#include <iostream>
__global__ void compute_nearest_neighbors(const int* histo_tab,
const size_t histo_pitch,
const float* clusters,
const size_t cluster_pitch,
int* results,
const size_t tiles_number)
{
size_t x = threadIdx.x;
size_t tile_index = blockIdx.x;
if (x >= 256 || tile_index >= tiles_number)
return;
__shared__ float cluster_distances[16];
for (auto y = 0; x == 0 && y < 16; y++)
cluster_distances[y] = 0;
__syncthreads();
int value = *(histo_tab + tile_index * histo_pitch / sizeof(int) + x);
for (auto y = 0; y < 16; y++)
{
float cluster_value = *(clusters + y * cluster_pitch / sizeof(float) + x);
// Euclidean distance
float local_distance = (cluster_value - value) * (cluster_value - value);
atomicAdd(cluster_distances + y, local_distance);
}
__syncthreads();
if (x != 0)
return;
auto result_ptr = results + tile_index;
*result_ptr = 0;
for (int i = 1; i < 16; i++)
{
if (cluster_distances[*result_ptr] > cluster_distances[i])
*result_ptr = i;
}
}
int* k_nearest_neighbors(const int* histo_tab, const float* clusters, const size_t tiles_number)
{
cudaError_t rc = cudaSuccess;
int* cuda_histo_tab;
size_t cuda_histo_tab_pitch;
auto tile_byte_size = 256 * sizeof(int);
rc = cudaMallocPitch(&cuda_histo_tab, &cuda_histo_tab_pitch, tile_byte_size, tiles_number);
if (rc)
{
std::cout << "Could not allocate memory for tiles histogram"
<< "on the device when computing nearest_neighbors\n";
exit(EXIT_FAILURE);
}
rc = cudaMemcpy2D(cuda_histo_tab, cuda_histo_tab_pitch, histo_tab, tile_byte_size,
tile_byte_size, tiles_number, cudaMemcpyHostToDevice);
if (rc)
{
std::cout << "Could not copy memory for tiles histogram"
<< "on the device when computing nearest_neighbors\n";
exit(EXIT_FAILURE);
}
float* cuda_clusters;
size_t cuda_clusters_pitch;
auto cluster_byte_size = 256 * sizeof(float);
rc = cudaMallocPitch(&cuda_clusters, &cuda_clusters_pitch, cluster_byte_size, 16);
if (rc)
{
std::cout << "Could not allocate memory for clusters"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
rc = cudaMemcpy2D(cuda_clusters, cuda_clusters_pitch, clusters, cluster_byte_size,
cluster_byte_size, 16, cudaMemcpyHostToDevice);
if (rc)
{
std::cout << "Could not copy memory for clusters"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
int* result;
rc = cudaMalloc(&result, sizeof(int) * tiles_number);
if (rc)
{
std::cout << "Could not allocate memory for nearest neighbors result"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
dim3 block_dim(256, 16);
compute_nearest_neighbors<<<tiles_number, 256>>>(cuda_histo_tab, cuda_histo_tab_pitch,
cuda_clusters, cuda_clusters_pitch, result, tiles_number);
int* output = (int*) malloc(sizeof(int) * tiles_number);
rc = cudaMemcpy(output, result, sizeof(int) * tiles_number, cudaMemcpyDeviceToHost);
cudaFree(cuda_clusters);
cudaFree(result);
return output;
}
|
f90a8a8cb6838d3baa95e163f7f18ea3be30274d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_plus_4_left [3][2];
static int dims_update_halo_kernel2_zvel_plus_4_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_plus_4_left_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(4,0,0);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(4,0,0);
}
__global__ void ops_update_halo_kernel2_zvel_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[0][0] * dims_update_halo_kernel2_zvel_plus_4_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[1][0] * dims_update_halo_kernel2_zvel_plus_4_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_4_left[0][0], dims_update_halo_kernel2_zvel_plus_4_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_4_left[1][0], dims_update_halo_kernel2_zvel_plus_4_left[1][1], arg1);
update_halo_kernel2_zvel_plus_4_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,52)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel2_zvel_plus_4_left");
OPS_kernels[52].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_plus_4_left_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_4_left_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_4_left_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_4_left_h[1][1]) {
dims_update_halo_kernel2_zvel_plus_4_left_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_plus_4_left_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_plus_4_left_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_plus_4_left_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_4_left, dims_update_halo_kernel2_zvel_plus_4_left_h, sizeof(dims_update_halo_kernel2_zvel_plus_4_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[52].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 52;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 52;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel2_zvel_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
| f90a8a8cb6838d3baa95e163f7f18ea3be30274d.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_plus_4_left [3][2];
static int dims_update_halo_kernel2_zvel_plus_4_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_plus_4_left_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(4,0,0);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(4,0,0);
}
__global__ void ops_update_halo_kernel2_zvel_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[0][0] * dims_update_halo_kernel2_zvel_plus_4_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_left[1][0] * dims_update_halo_kernel2_zvel_plus_4_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_4_left[0][0], dims_update_halo_kernel2_zvel_plus_4_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_4_left[1][0], dims_update_halo_kernel2_zvel_plus_4_left[1][1], arg1);
update_halo_kernel2_zvel_plus_4_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,52)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel2_zvel_plus_4_left");
OPS_kernels[52].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_plus_4_left_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_4_left_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_4_left_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_4_left_h[1][1]) {
dims_update_halo_kernel2_zvel_plus_4_left_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_plus_4_left_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_plus_4_left_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_plus_4_left_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_4_left, dims_update_halo_kernel2_zvel_plus_4_left_h, sizeof(dims_update_halo_kernel2_zvel_plus_4_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_plus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[52].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 52;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 52;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel2_zvel_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
9cfff7a416444773b5b63c36236402111ea23723.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
float Pvalue = 0;
int Width = M.width;
for (int k = 0; k < Width; ++k) {
float Melement = M.elements[threadIdx.y*Width+k];
float Nelement = N.elements[k*Width+threadIdx.x];
Pvalue += Melement * Nelement;
}
P.elements[threadIdx.y*Width+threadIdx.x] = Pvalue;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 9cfff7a416444773b5b63c36236402111ea23723.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
float Pvalue = 0;
int Width = M.width;
for (int k = 0; k < Width; ++k) {
float Melement = M.elements[threadIdx.y*Width+k];
float Nelement = N.elements[k*Width+threadIdx.x];
Pvalue += Melement * Nelement;
}
P.elements[threadIdx.y*Width+threadIdx.x] = Pvalue;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.