hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6a98d4ca0be67bb5caf22d95ef0bfafa36f68f61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
#include "cudpp_radixsort.h"
#include <cudpp_globals.h>
#include "sharedmem.h"
#include "cta/radixsort_cta.cu"
#ifdef __DEVICE_EMULATION__
#define __EMUSYNC __syncthreads()
#else
#define __EMUSYNC
#endif
/**
* @file
* radixsort_kernel.cu
*
* @brief CUDPP kernel-level radix sorting routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name RadixSort Functions
* @{
*/
typedef unsigned int uint;
/** @brief And empty kernel used to reset CTA issue hardware
**/
__global__ void emptyKernel() {}
/** @brief Does special binary arithmetic before sorting floats
*
* Uses floatFlip function to flip bits.
* @param[in,out] values Values to be manipulated
* @param[in] numValues Number of values to be flipped
**/
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
/** @brief Undoes the flips from flipFloats
*
* Uses floatUnflip function to unflip bits.
* @param[in,out] values Values to be manipulated
* @param[in] numValues Number of values to be unflipped
**/
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
/** @brief Optimization for sorts of WARP_SIZE or fewer elements
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] numElements Number of elements in the sort.
*/
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[WARP_SIZE]; //remove class distinctions
volatile __shared__ uint sValues[WARP_SIZE];
volatile __shared__ uint sFlags[WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__EMUSYNC; // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
uint temp, tempval;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
temp = sKeys[threadIdx.x];
tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
#ifdef __DEVICE_EMULATION__
}
__EMUSYNC;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
#endif
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__EMUSYNC; // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
/** @brief Optimization for sorts of WARP_SIZE or fewer elements. Keys-Only version.
*
* @param[in,out] keys Keys to be sorted
* @param[in] numElements Total number of elements to be sorted
**/
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[WARP_SIZE];
volatile __shared__ uint sFlags[WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__EMUSYNC; // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
uint temp;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
#ifdef __DEVICE_EMULATION__
}
__EMUSYNC;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
#endif
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__EMUSYNC; // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
/** @brief sorts all blocks of data independently in shared memory.
* Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
*
* The radix sort is done in two stages. This stage calls radixSortBlock on each
* block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
* differently than arrays that are not. "flip" is used to only compile in the
* float flip code when float keys are used. "loop" is used when persistent CTAs
* are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] keysOut Output of sorted keys
* @param[out] valuesOut Output of associated values
* @param[in] keysIn Input of unsorted keys in GPU
* @param[in] valuesIn Input of associated input values
* @param[in] numElements Total number of elements to sort
* @param[in] totalBlocks The number of blocks of data to sort
*/
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/** @brief Computes the number of keys of each radix in each block stores offset.
*
* Given an array with blocks sorted according to a 4-bit radix group, each
* block counts the number of keys that fall into each radix in the group, and
* finds the starting offset of each radix in the block. It then writes the radix
* counts to the counters array, and the starting offsets to the blockOffsets array.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[in] keys Input keys
* @param[out] counters Radix count for each block
* @param[out] blockOffsets The offset address for each block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of blocks
**/
template<uint startbit, bool fullBlocks, bool loop>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE]] = threadIdx.x + SORT_CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]] =
threadIdx.x + SORT_CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]];
}
if(threadIdx.x == SORT_CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]] =
2 * SORT_CTA_SIZE - sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/**@brief Reorders data in the global array.
*
* reorderData shuffles data in the array globally after the radix
* offsets have been found. On compute version 1.1 and earlier GPUs, this code depends
* on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
*
* On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
* that all writes are coalesced using extra work in the kernel. On later
* GPUs coalescing rules have been relaxed, so this extra overhead hurts
* performance. On these GPUs we set manualCoalesce=false and directly store
* the results.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] outKeys Output of sorted keys
* @param[out] outValues Output of associated values
* @param[in] keys Input of unsorted keys in GPU
* @param[in] values Input of associated input values
* @param[in] blockOffsets The offset address for each block
* @param[in] offsets Address of each radix within each block
* @param[in] sizes Number of elements in a block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of data blocks to process
*
* @todo Args that are const below should be prototyped as const
**/
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[SORT_CTA_SIZE];
__shared__ uint2 sValues2[SORT_CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + SORT_CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
/** @brief Sorts all blocks of data independently in shared memory.
* Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
*
* The radix sort is done in two stages. This stage calls radixSortBlock on each
* block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
* differently than arrays that are not. "flip" is used to only compile in the
* float flip code when float keys are used. "loop" is used when persistent CTAs
* are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] keysOut Output of sorted keys GPU main memory
* @param[in] keysIn Input of unsorted keys in GPU main memory
* @param[in] numElements Total number of elements to sort
* @param[in] totalBlocks Total number of blocks to sort
*
*/
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/** @brief Reorders data in the global array.
*
* reorderDataKeysOnly shuffles data in the array globally after the radix offsets
* have been found. On compute version 1.1 and earlier GPUs, this code depends
* on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
*
* On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
* that all writes are coalesced using extra work in the kernel. On later
* GPUs coalescing rules have been relaxed, so this extra overhead hurts
* performance. On these GPUs we set manualCoalesce=false and directly store
* the results.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] outKeys Output result of reorderDataKeysOnly()
* @param[in] keys Keys to be reordered
* @param[in] blockOffsets Start offset for each block
* @param[in] offsets Offset of each radix within each block
* @param[in] sizes Number of elements in a block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of blocks
*/
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[SORT_CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
/** @} */ // end radixsort functions
/** @} */ // end cudpp_kernel
| 6a98d4ca0be67bb5caf22d95ef0bfafa36f68f61.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
#include "cudpp_radixsort.h"
#include <cudpp_globals.h>
#include "sharedmem.h"
#include "cta/radixsort_cta.cu"
#ifdef __DEVICE_EMULATION__
#define __EMUSYNC __syncthreads()
#else
#define __EMUSYNC
#endif
/**
* @file
* radixsort_kernel.cu
*
* @brief CUDPP kernel-level radix sorting routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name RadixSort Functions
* @{
*/
typedef unsigned int uint;
/** @brief And empty kernel used to reset CTA issue hardware
**/
__global__ void emptyKernel() {}
/** @brief Does special binary arithmetic before sorting floats
*
* Uses floatFlip function to flip bits.
* @param[in,out] values Values to be manipulated
* @param[in] numValues Number of values to be flipped
**/
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
/** @brief Undoes the flips from flipFloats
*
* Uses floatUnflip function to unflip bits.
* @param[in,out] values Values to be manipulated
* @param[in] numValues Number of values to be unflipped
**/
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
/** @brief Optimization for sorts of WARP_SIZE or fewer elements
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] numElements Number of elements in the sort.
*/
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[WARP_SIZE]; //remove class distinctions
volatile __shared__ uint sValues[WARP_SIZE];
volatile __shared__ uint sFlags[WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__EMUSYNC; // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
uint temp, tempval;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
temp = sKeys[threadIdx.x];
tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
#ifdef __DEVICE_EMULATION__
}
__EMUSYNC;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
#endif
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__EMUSYNC; // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
/** @brief Optimization for sorts of WARP_SIZE or fewer elements. Keys-Only version.
*
* @param[in,out] keys Keys to be sorted
* @param[in] numElements Total number of elements to be sorted
**/
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[WARP_SIZE];
volatile __shared__ uint sFlags[WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__EMUSYNC; // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
uint temp;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
#ifdef __DEVICE_EMULATION__
}
__EMUSYNC;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
#endif
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__EMUSYNC; // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
/** @brief sorts all blocks of data independently in shared memory.
* Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
*
* The radix sort is done in two stages. This stage calls radixSortBlock on each
* block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
* differently than arrays that are not. "flip" is used to only compile in the
* float flip code when float keys are used. "loop" is used when persistent CTAs
* are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] keysOut Output of sorted keys
* @param[out] valuesOut Output of associated values
* @param[in] keysIn Input of unsorted keys in GPU
* @param[in] valuesIn Input of associated input values
* @param[in] numElements Total number of elements to sort
* @param[in] totalBlocks The number of blocks of data to sort
*/
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/** @brief Computes the number of keys of each radix in each block stores offset.
*
* Given an array with blocks sorted according to a 4-bit radix group, each
* block counts the number of keys that fall into each radix in the group, and
* finds the starting offset of each radix in the block. It then writes the radix
* counts to the counters array, and the starting offsets to the blockOffsets array.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[in] keys Input keys
* @param[out] counters Radix count for each block
* @param[out] blockOffsets The offset address for each block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of blocks
**/
template<uint startbit, bool fullBlocks, bool loop>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE]] = threadIdx.x + SORT_CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]] =
threadIdx.x + SORT_CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]];
}
if(threadIdx.x == SORT_CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]] =
2 * SORT_CTA_SIZE - sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/**@brief Reorders data in the global array.
*
* reorderData shuffles data in the array globally after the radix
* offsets have been found. On compute version 1.1 and earlier GPUs, this code depends
* on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
*
* On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
* that all writes are coalesced using extra work in the kernel. On later
* GPUs coalescing rules have been relaxed, so this extra overhead hurts
* performance. On these GPUs we set manualCoalesce=false and directly store
* the results.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] outKeys Output of sorted keys
* @param[out] outValues Output of associated values
* @param[in] keys Input of unsorted keys in GPU
* @param[in] values Input of associated input values
* @param[in] blockOffsets The offset address for each block
* @param[in] offsets Address of each radix within each block
* @param[in] sizes Number of elements in a block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of data blocks to process
*
* @todo Args that are const below should be prototyped as const
**/
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[SORT_CTA_SIZE];
__shared__ uint2 sValues2[SORT_CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + SORT_CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
/** @brief Sorts all blocks of data independently in shared memory.
* Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
*
* The radix sort is done in two stages. This stage calls radixSortBlock on each
* block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
* differently than arrays that are not. "flip" is used to only compile in the
* float flip code when float keys are used. "loop" is used when persistent CTAs
* are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] keysOut Output of sorted keys GPU main memory
* @param[in] keysIn Input of unsorted keys in GPU main memory
* @param[in] numElements Total number of elements to sort
* @param[in] totalBlocks Total number of blocks to sort
*
*/
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/** @brief Reorders data in the global array.
*
* reorderDataKeysOnly shuffles data in the array globally after the radix offsets
* have been found. On compute version 1.1 and earlier GPUs, this code depends
* on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
*
* On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
* that all writes are coalesced using extra work in the kernel. On later
* GPUs coalescing rules have been relaxed, so this extra overhead hurts
* performance. On these GPUs we set manualCoalesce=false and directly store
* the results.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] outKeys Output result of reorderDataKeysOnly()
* @param[in] keys Keys to be reordered
* @param[in] blockOffsets Start offset for each block
* @param[in] offsets Offset of each radix within each block
* @param[in] sizes Number of elements in a block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of blocks
*/
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[SORT_CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
/** @} */ // end radixsort functions
/** @} */ // end cudpp_kernel
|
275e30a67529a86e7cfe1d3be8b0a867487f4f52.hip | // !!! This is a file automatically generated by hipify!!!
#include "Scene.h"
#include <iostream>
//void addShape(ImplicitShape* shape); // TODO
void Scene::addShape(Sphere* sph) {
spheres_num_++;
shapes_.push_back(sph);
}
void Scene::addLight(Light* light) {
lights_num_++;
lights_.push_back(light);
}
void Scene::addAmbientLight(AmbientLight* light) {
ambientLight_ = light;
}
__host__ void Scene::shapes_to_device() {
size_t total_size = 0;
for (Sphere *sph : shapes_) {
total_size += sizeof(*sph);
}
// Static allocation on device memory
HANDLE_ERROR(
hipMalloc((void**)&devSpheres_, total_size)
);
int offset = 0;
for (Sphere *sph : shapes_) {
// Copy from host to device
HANDLE_ERROR(
hipMemcpy((void*)(devSpheres_+offset), (void*)sph, sizeof(*sph), hipMemcpyHostToDevice)
);
offset++;
}
if (offset != spheres_num_) {
std::cout << "ERROR"
"offset = " << offset <<
"spheres_num_ = " << spheres_num_ <<
std::endl;
exit(1);
}
}
__host__ void Scene::lights_to_device() {
if (lights_num_ > 0) {
size_t total_size = 0;
for (Light *lgt : lights_) {
total_size += sizeof(*lgt);
}
// Static allocation on device memory
HANDLE_ERROR(
hipMalloc((void**)&devLights_, total_size)
);
int offset = 0;
for (Light *lgt : lights_) {
// Copy from host to device
HANDLE_ERROR(
hipMemcpy((void*)(devLights_+offset), (void*)lgt, sizeof(*lgt), hipMemcpyHostToDevice)
);
offset++;
}
if (offset != lights_num_) {
std::cout << "ERROR"
"offset = " << offset <<
"lights_num_ = " << lights_num_ <<
std::endl;
exit(1);
}
}
if (hasAmbientLight()) {
HANDLE_ERROR(
hipMalloc((void**)&devAmbLight_, sizeof(AmbientLight))
);
HANDLE_ERROR(
hipMemcpy((void*)devAmbLight_, (void*)ambientLight_, sizeof(AmbientLight), hipMemcpyHostToDevice)
);
}
}
__host__ Scene* Scene::to_device() {
if (spheres_num_ > 0) {
shapes_to_device();
}
if (lights_num_ > 0) {
lights_to_device();
}
// Static allocation on device memory
HANDLE_ERROR(
hipMalloc((void**)&devPtr_, sizeof(Scene))
);
// Copy from host to device
HANDLE_ERROR(
hipMemcpy((void*)devPtr_, (void*)this, sizeof(Scene), hipMemcpyHostToDevice)
);
return devPtr_;
}
| 275e30a67529a86e7cfe1d3be8b0a867487f4f52.cu | #include "Scene.h"
#include <iostream>
//void addShape(ImplicitShape* shape); // TODO
void Scene::addShape(Sphere* sph) {
spheres_num_++;
shapes_.push_back(sph);
}
void Scene::addLight(Light* light) {
lights_num_++;
lights_.push_back(light);
}
void Scene::addAmbientLight(AmbientLight* light) {
ambientLight_ = light;
}
__host__ void Scene::shapes_to_device() {
size_t total_size = 0;
for (Sphere *sph : shapes_) {
total_size += sizeof(*sph);
}
// Static allocation on device memory
HANDLE_ERROR(
cudaMalloc((void**)&devSpheres_, total_size)
);
int offset = 0;
for (Sphere *sph : shapes_) {
// Copy from host to device
HANDLE_ERROR(
cudaMemcpy((void*)(devSpheres_+offset), (void*)sph, sizeof(*sph), cudaMemcpyHostToDevice)
);
offset++;
}
if (offset != spheres_num_) {
std::cout << "ERROR"
"offset = " << offset <<
"spheres_num_ = " << spheres_num_ <<
std::endl;
exit(1);
}
}
__host__ void Scene::lights_to_device() {
if (lights_num_ > 0) {
size_t total_size = 0;
for (Light *lgt : lights_) {
total_size += sizeof(*lgt);
}
// Static allocation on device memory
HANDLE_ERROR(
cudaMalloc((void**)&devLights_, total_size)
);
int offset = 0;
for (Light *lgt : lights_) {
// Copy from host to device
HANDLE_ERROR(
cudaMemcpy((void*)(devLights_+offset), (void*)lgt, sizeof(*lgt), cudaMemcpyHostToDevice)
);
offset++;
}
if (offset != lights_num_) {
std::cout << "ERROR"
"offset = " << offset <<
"lights_num_ = " << lights_num_ <<
std::endl;
exit(1);
}
}
if (hasAmbientLight()) {
HANDLE_ERROR(
cudaMalloc((void**)&devAmbLight_, sizeof(AmbientLight))
);
HANDLE_ERROR(
cudaMemcpy((void*)devAmbLight_, (void*)ambientLight_, sizeof(AmbientLight), cudaMemcpyHostToDevice)
);
}
}
__host__ Scene* Scene::to_device() {
if (spheres_num_ > 0) {
shapes_to_device();
}
if (lights_num_ > 0) {
lights_to_device();
}
// Static allocation on device memory
HANDLE_ERROR(
cudaMalloc((void**)&devPtr_, sizeof(Scene))
);
// Copy from host to device
HANDLE_ERROR(
cudaMemcpy((void*)devPtr_, (void*)this, sizeof(Scene), cudaMemcpyHostToDevice)
);
return devPtr_;
}
|
21437adc117dfbcb6c80e66f52324b8ca44a1f75.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "negative_log_likelihood_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "../negative_log_likelihood_layer.h"
#include "../neural_network_exception.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void negative_log_likelihood_upd_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float actual_val = actual[input_offset];
float predicted_val = predicted[input_offset];
err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
template<bool add_update_to_destination>
__global__ void negative_log_likelihood_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
float scale,
int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float actual_val = target_input_neurons[elem_id];
float predicted_val = deriv_input_neurons[elem_id];
float gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F;
if (add_update_to_destination)
output[elem_id] += scale * gradient;
else
output[elem_id] = scale * gradient;
}
}
template<bool add_update_to_destination>
__global__ void negative_log_likelihood_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
const float * __restrict scale_mask,
float scale,
int elem_count_per_feature_map,
int input_feature_map_count,
int entry_count)
{
int neuron_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count))
{
int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id];
float gradient = 0.0F;
if (mask != 0.0F)
{
float actual_val = target_input_neurons[elem_id];
float predicted_val = deriv_input_neurons[elem_id];
gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F;
gradient *= scale * mask;
}
if (add_update_to_destination)
output[elem_id] += gradient;
else
output[elem_id] = gradient;
}
}
negative_log_likelihood_layer_updater_cuda::negative_log_likelihood_layer_updater_cuda()
{
}
negative_log_likelihood_layer_updater_cuda::~negative_log_likelihood_layer_updater_cuda()
{
}
void negative_log_likelihood_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
hipLaunchKernelGGL(( negative_log_likelihood_upd_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void negative_log_likelihood_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
if (input_neurons_buffers.size() > 2)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
if (add_update_to_destination)
hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
*input_neurons_buffers[2],
scale,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
else
hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
*input_neurons_buffers[2],
scale,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
}
else
{
int elem_count = entry_count * input_elem_count_per_entry_list[0];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
if (add_update_to_destination)
hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
scale,
elem_count);
else
hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
scale,
elem_count);
}
}
void negative_log_likelihood_layer_updater_cuda::updater_configured()
{
if (actions.find(layer_action(layer_action::backward_data, 1)) != actions.end())
throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for targets");
if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end())
throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for scale mask");
nnforge_shared_ptr<const negative_log_likelihood_layer> layer_derived = nnforge_dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema);
scale = layer_derived->scale;
}
bool negative_log_likelihood_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
int negative_log_likelihood_layer_updater_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
| 21437adc117dfbcb6c80e66f52324b8ca44a1f75.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "negative_log_likelihood_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "../negative_log_likelihood_layer.h"
#include "../neural_network_exception.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void negative_log_likelihood_upd_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float actual_val = actual[input_offset];
float predicted_val = predicted[input_offset];
err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
template<bool add_update_to_destination>
__global__ void negative_log_likelihood_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
float scale,
int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float actual_val = target_input_neurons[elem_id];
float predicted_val = deriv_input_neurons[elem_id];
float gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F;
if (add_update_to_destination)
output[elem_id] += scale * gradient;
else
output[elem_id] = scale * gradient;
}
}
template<bool add_update_to_destination>
__global__ void negative_log_likelihood_backprop_upd_kernel(
float * __restrict output,
const float * __restrict deriv_input_neurons,
const float * __restrict target_input_neurons,
const float * __restrict scale_mask,
float scale,
int elem_count_per_feature_map,
int input_feature_map_count,
int entry_count)
{
int neuron_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count))
{
int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id];
float gradient = 0.0F;
if (mask != 0.0F)
{
float actual_val = target_input_neurons[elem_id];
float predicted_val = deriv_input_neurons[elem_id];
gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F;
gradient *= scale * mask;
}
if (add_update_to_destination)
output[elem_id] += gradient;
else
output[elem_id] = gradient;
}
}
negative_log_likelihood_layer_updater_cuda::negative_log_likelihood_layer_updater_cuda()
{
}
negative_log_likelihood_layer_updater_cuda::~negative_log_likelihood_layer_updater_cuda()
{
}
void negative_log_likelihood_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
negative_log_likelihood_upd_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void negative_log_likelihood_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
if (input_neurons_buffers.size() > 2)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
if (add_update_to_destination)
negative_log_likelihood_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
*input_neurons_buffers[2],
scale,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
else
negative_log_likelihood_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
*input_neurons_buffers[2],
scale,
input_elem_count_per_feature_map_list[0],
input_configuration_specific_list[0].feature_map_count,
entry_count);
}
else
{
int elem_count = entry_count * input_elem_count_per_entry_list[0];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
if (add_update_to_destination)
negative_log_likelihood_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
scale,
elem_count);
else
negative_log_likelihood_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
scale,
elem_count);
}
}
void negative_log_likelihood_layer_updater_cuda::updater_configured()
{
if (actions.find(layer_action(layer_action::backward_data, 1)) != actions.end())
throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for targets");
if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end())
throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for scale mask");
nnforge_shared_ptr<const negative_log_likelihood_layer> layer_derived = nnforge_dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema);
scale = layer_derived->scale;
}
bool negative_log_likelihood_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
int negative_log_likelihood_layer_updater_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
38289e6598773e260704b032423c797f6d6684ca.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <rocblas.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
else
{
cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
na = -a;
cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
r0 = r1;
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
__global__ void axpy(float *d_p, float *d_r, float alpha, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
d_p[i] = d_r[i]*alpha + d_p[i];
}
}
__global__ void scal(float *d_p, float alpha, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
d_p[i] = d_p[i]*alpha;
}
}
__global__ void cpy(float *d_p, float *d_r, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
d_p[i] = d_r[i];
}
}
__global__ void csrmv(float *d_Ax, int *d_col, int *d_row, float *d_val, float *d_x, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements){
float sub = 0.0;
int j;
for (j = d_row[i]; j < d_row[i+1]; j++){
sub += d_val[j] * d_x[d_col[j]];
}
d_Ax[i] = sub;
}
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
// sparse matrix vector product: d_Ax = A * d_x
//hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( csrmv), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Ax, d_col, d_row, d_val, d_x, N);
//azpy: d_r = d_r + alpham1 * d_Ax
//hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1);
hipLaunchKernelGGL(( axpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_Ax, alpham1, N); // PODMIEN FUNCKJE (ZADANIE-I)
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1);
hipLaunchKernelGGL(( scal), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, b, N); // PODMIEN FUNCKJE (ZADANIE-I)
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1);
hipLaunchKernelGGL(( axpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, d_r, alpha, N); // PODMIEN FUNCKJE (ZADANIE-I)
}
else
{
//cpy: d_p = d_r
//cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1);
hipLaunchKernelGGL(( cpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, d_r, N); // PODMIEN FUNCKJE (ZADANIE-I)
}
//sparse matrix-vector product: d_Ax = A * d_p
//hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
hipLaunchKernelGGL(( csrmv), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Ax, d_col, d_row, d_val, d_p, N);
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1);
hipLaunchKernelGGL(( axpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, d_p, a, N); // PODMIEN FUNCKJE (ZADANIE-I)
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( axpy), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_Ax, na, N);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
| 38289e6598773e260704b032423c797f6d6684ca.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
else
{
cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
na = -a;
cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
r0 = r1;
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
__global__ void axpy(float *d_p, float *d_r, float alpha, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
d_p[i] = d_r[i]*alpha + d_p[i];
}
}
__global__ void scal(float *d_p, float alpha, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
d_p[i] = d_p[i]*alpha;
}
}
__global__ void cpy(float *d_p, float *d_r, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
d_p[i] = d_r[i];
}
}
__global__ void csrmv(float *d_Ax, int *d_col, int *d_row, float *d_val, float *d_x, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < numElements){
float sub = 0.0;
int j;
for (j = d_row[i]; j < d_row[i+1]; j++){
sub += d_val[j] * d_x[d_col[j]];
}
d_Ax[i] = sub;
}
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
// sparse matrix vector product: d_Ax = A * d_x
//cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-I)
csrmv<<<blocksPerGrid, threadsPerBlock>>>(d_Ax, d_col, d_row, d_val, d_x, N);
//azpy: d_r = d_r + alpham1 * d_Ax
//cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1);
axpy<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_Ax, alpham1, N); // PODMIEN FUNCKJE (ZADANIE-I)
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1);
scal<<<blocksPerGrid, threadsPerBlock>>>(d_p, b, N); // PODMIEN FUNCKJE (ZADANIE-I)
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1);
axpy<<<blocksPerGrid, threadsPerBlock>>>(d_p, d_r, alpha, N); // PODMIEN FUNCKJE (ZADANIE-I)
}
else
{
//cpy: d_p = d_r
//cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1);
cpy<<<blocksPerGrid, threadsPerBlock>>>(d_p, d_r, N); // PODMIEN FUNCKJE (ZADANIE-I)
}
//sparse matrix-vector product: d_Ax = A * d_p
//cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
csrmv<<<blocksPerGrid, threadsPerBlock>>>(d_Ax, d_col, d_row, d_val, d_p, N);
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1);
axpy<<<blocksPerGrid, threadsPerBlock>>>(d_x, d_p, a, N); // PODMIEN FUNCKJE (ZADANIE-I)
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
axpy<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_Ax, na, N);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
|
3f7a28fae0475c96f7be6853a45c67493b96a3ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include "CudaChecks.hpp"
void cudaAssert(const hipError_t err, const char *file, const int line)
{
if(hipSuccess != err)
{
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", file, line, hipGetErrorString(err));
exit(1);
}
}
| 3f7a28fae0475c96f7be6853a45c67493b96a3ce.cu | #include <iostream>
#include <stdio.h>
#include "CudaChecks.hpp"
void cudaAssert(const cudaError err, const char *file, const int line)
{
if(cudaSuccess != err)
{
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", file, line, cudaGetErrorString(err));
exit(1);
}
}
|
0b0ec19355f3cde591c3c2d487e66847fbe4d0f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <string>
#include "solver.h"
using namespace std;
typedef unsigned char uchar;
int num_train = 60000, num_test = 10000;
int reverseInt(int n) {
int bytes = 4;
unsigned char ch[bytes];
for (int i = 0; i < bytes; i++) {
ch[i] = (n >> i * 8) & 255;
}
int p = 0;
for (int i = 0; i < bytes; i++) {
p += (int) ch[i] << (bytes - i - 1) * 8;
}
return p;
}
void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) {
printf("MNIST reading has been called from mnist_test.cu\n");
printf("MNIST reading has been called- from mninst_test.cu\n");
printf("MNIST reading has been called- from mninst_test.cu\n");
printf("MNIST reading has been called- from mninst_test.cu\n");
string filename_train_images = "data/train-images.idx3-ubyte";
string filename_train_labels = "data/train-labels.idx1-ubyte";
string filename_test_images = "data/t10k-images.idx3-ubyte";
string filename_test_labels = "data/t10k-labels.idx1-ubyte";
// read train/test images
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_images;
else
filename = filename_test_images;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_images, sizeof(n_images));
n_images = reverseInt(n_images);
f.read((char *) &n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
f.read((char *) &n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int k = 0; k < n_images; k++) {
vector<uchar> temp;
temp.reserve(n_rows * n_cols);
for (int j = 0; j < n_rows * n_cols; j++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
temp.push_back(t);
}
if (i == 0)
train_images.push_back(temp);
else
test_images.push_back(temp);
}
f.close();
}
// read train/test labels
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_labels;
else
filename = filename_test_labels;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_labels = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_labels, sizeof(n_labels));
n_labels = reverseInt(n_labels);
for (int k = 0; k < n_labels; k++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
if (i == 0)
train_labels.push_back(t);
else
test_labels.push_back(t);
}
f.close();
}
}
int main() {
int rows = 28, cols = 28, channels = 1;
float *f_train_images, *f_test_images;
int *f_train_labels, *f_test_labels;
// int rows = 28, cols = 28, channels = 1;
int input_size = rows * cols * channels;
// f_train_images = (float *)malloc(num_train * input_size * sizeof(float));
// f_train_labels = (int *)malloc(num_train * sizeof(int));
checkCudaErrors(hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float)));
checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(float)));
f_test_images = (float *)malloc(num_test * input_size * sizeof(float));
f_test_labels = (int *)malloc(num_test * sizeof(int));
{
vector<vector<uchar> > train_images, test_images;
vector<uchar> train_labels, test_labels;
readMNIST(train_images, test_images, train_labels, test_labels);
for (int k = 0; k < num_train; k++) {
for (int j = 0; j < rows * cols; j++) {
f_train_images[k * input_size + j] = (float)train_images[k][j];
}
f_train_labels[k] = (int)train_labels[k];
}
for (int k = 0; k < num_test; k++) {
for (int j = 0; j < rows * cols; j++) {
f_test_images[k * input_size + j] = (float)test_images[k][j];
}
f_test_labels[k] = (int)test_labels[k];
}
}
float *mean_image;
mean_image = (float *)malloc(input_size * sizeof(float));
for (int i = 0; i < input_size; i++) {
mean_image[i] = 0;
for (int k = 0; k < num_train; k++) {
mean_image[i] += f_train_images[k * input_size + i];
}
mean_image[i] /= num_train;
}
for (int i = 0; i < num_train; i++) {
for (int j = 0; j < input_size; j++) {
f_train_images[i * input_size + j] -= mean_image[j];
}
}
for (int i = 0; i < num_test; i++) {
for (int j = 0; j < input_size; j++) {
f_test_images[i * input_size + j] -= mean_image[j];
}
}
vector<LayerSpecifier> layer_specifier;
{
ConvDescriptor layer0;
layer0.initializeValues(1, 3, 3, 3, 28, 28, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer0;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer1;
layer1.initializeValues(3 * 28 * 28, 50, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer1;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer2;
layer2.initializeValues(50, 10);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer2;
layer_specifier.push_back(temp);
}
{
SoftmaxDescriptor layer2_smax;
layer2_smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 10, 1, 1);
LayerSpecifier temp;
temp.initPointer(SOFTMAX);
*((SoftmaxDescriptor *)temp.params) = layer2_smax;
layer_specifier.push_back(temp);
}
int batch_size = 128;
long long dropout_seed = 1;
float softmax_eps = 1e-8;
float init_std_dev = 0.01;
NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vDNN_CONV, vDNN_PERFORMANCE_OPTIMAL, SGD);
int num_epoch = 1000;
double learning_rate = 1e-4;
double learning_rate_decay = 0.9;
Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train);
vector<float> loss;
vector<int> val_acc;
solver.train(loss, val_acc);
int num_correct;
solver.checkAccuracy(f_train_images, f_train_labels, num_train, &num_correct);
cout << num_correct << endl;
}
| 0b0ec19355f3cde591c3c2d487e66847fbe4d0f5.cu | #include <iostream>
#include <cstdlib>
#include <string>
#include "solver.h"
using namespace std;
typedef unsigned char uchar;
int num_train = 60000, num_test = 10000;
int reverseInt(int n) {
int bytes = 4;
unsigned char ch[bytes];
for (int i = 0; i < bytes; i++) {
ch[i] = (n >> i * 8) & 255;
}
int p = 0;
for (int i = 0; i < bytes; i++) {
p += (int) ch[i] << (bytes - i - 1) * 8;
}
return p;
}
void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) {
printf("MNIST reading has been called from mnist_test.cu\n");
printf("MNIST reading has been called- from mninst_test.cu\n");
printf("MNIST reading has been called- from mninst_test.cu\n");
printf("MNIST reading has been called- from mninst_test.cu\n");
string filename_train_images = "data/train-images.idx3-ubyte";
string filename_train_labels = "data/train-labels.idx1-ubyte";
string filename_test_images = "data/t10k-images.idx3-ubyte";
string filename_test_labels = "data/t10k-labels.idx1-ubyte";
// read train/test images
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_images;
else
filename = filename_test_images;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_images, sizeof(n_images));
n_images = reverseInt(n_images);
f.read((char *) &n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
f.read((char *) &n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int k = 0; k < n_images; k++) {
vector<uchar> temp;
temp.reserve(n_rows * n_cols);
for (int j = 0; j < n_rows * n_cols; j++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
temp.push_back(t);
}
if (i == 0)
train_images.push_back(temp);
else
test_images.push_back(temp);
}
f.close();
}
// read train/test labels
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_labels;
else
filename = filename_test_labels;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_labels = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_labels, sizeof(n_labels));
n_labels = reverseInt(n_labels);
for (int k = 0; k < n_labels; k++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
if (i == 0)
train_labels.push_back(t);
else
test_labels.push_back(t);
}
f.close();
}
}
int main() {
int rows = 28, cols = 28, channels = 1;
float *f_train_images, *f_test_images;
int *f_train_labels, *f_test_labels;
// int rows = 28, cols = 28, channels = 1;
int input_size = rows * cols * channels;
// f_train_images = (float *)malloc(num_train * input_size * sizeof(float));
// f_train_labels = (int *)malloc(num_train * sizeof(int));
checkCudaErrors(cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float)));
checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(float)));
f_test_images = (float *)malloc(num_test * input_size * sizeof(float));
f_test_labels = (int *)malloc(num_test * sizeof(int));
{
vector<vector<uchar> > train_images, test_images;
vector<uchar> train_labels, test_labels;
readMNIST(train_images, test_images, train_labels, test_labels);
for (int k = 0; k < num_train; k++) {
for (int j = 0; j < rows * cols; j++) {
f_train_images[k * input_size + j] = (float)train_images[k][j];
}
f_train_labels[k] = (int)train_labels[k];
}
for (int k = 0; k < num_test; k++) {
for (int j = 0; j < rows * cols; j++) {
f_test_images[k * input_size + j] = (float)test_images[k][j];
}
f_test_labels[k] = (int)test_labels[k];
}
}
float *mean_image;
mean_image = (float *)malloc(input_size * sizeof(float));
for (int i = 0; i < input_size; i++) {
mean_image[i] = 0;
for (int k = 0; k < num_train; k++) {
mean_image[i] += f_train_images[k * input_size + i];
}
mean_image[i] /= num_train;
}
for (int i = 0; i < num_train; i++) {
for (int j = 0; j < input_size; j++) {
f_train_images[i * input_size + j] -= mean_image[j];
}
}
for (int i = 0; i < num_test; i++) {
for (int j = 0; j < input_size; j++) {
f_test_images[i * input_size + j] -= mean_image[j];
}
}
vector<LayerSpecifier> layer_specifier;
{
ConvDescriptor layer0;
layer0.initializeValues(1, 3, 3, 3, 28, 28, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer0;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer1;
layer1.initializeValues(3 * 28 * 28, 50, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer1;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer2;
layer2.initializeValues(50, 10);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer2;
layer_specifier.push_back(temp);
}
{
SoftmaxDescriptor layer2_smax;
layer2_smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 10, 1, 1);
LayerSpecifier temp;
temp.initPointer(SOFTMAX);
*((SoftmaxDescriptor *)temp.params) = layer2_smax;
layer_specifier.push_back(temp);
}
int batch_size = 128;
long long dropout_seed = 1;
float softmax_eps = 1e-8;
float init_std_dev = 0.01;
NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vDNN_CONV, vDNN_PERFORMANCE_OPTIMAL, SGD);
int num_epoch = 1000;
double learning_rate = 1e-4;
double learning_rate_decay = 0.9;
Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train);
vector<float> loss;
vector<int> val_acc;
solver.train(loss, val_acc);
int num_correct;
solver.checkAccuracy(f_train_images, f_train_labels, num_train, &num_correct);
cout << num_correct << endl;
}
|
814c5dcd86640c4013a8e3d934dec331463779bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/stack_grad_kernel.h"
namespace phi {
template <typename T, typename IntType>
__global__ void UnStackHelperCUDAKernel(const T* __restrict__ input,
int pre_dim_size,
int split_dim_size,
int suf_dim_size,
int num_split,
T** output_ptrs) {
assert(blockDim.y == 1);
assert(blockDim.z == 1);
// In this case they are equal
assert(split_dim_size % num_split == 0);
IntType size = pre_dim_size * split_dim_size * suf_dim_size;
IntType each_dim_size = split_dim_size / num_split;
for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size;
offset += blockDim.x * gridDim.x) {
IntType i = offset / (split_dim_size * suf_dim_size);
IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size;
IntType k = offset % suf_dim_size;
T* output = output_ptrs[j / each_dim_size];
if (output == nullptr) {
return;
}
IntType output_ind = i * each_dim_size * suf_dim_size +
(j % each_dim_size) * suf_dim_size + k;
*(output + output_ind) = input[offset];
}
}
template <typename T, typename Context>
void StackGradKernel(const Context& dev_ctx,
const DenseTensor& out,
int axis,
std::vector<DenseTensor*> x_grad) {
if (axis < 0) axis += out.dims().size();
int n = out.dims()[axis];
PADDLE_ENFORCE_EQ(n,
x_grad.size(),
phi::errors::InvalidArgument(
"Output x_grad size should be equal to n, but"
" received n is:%d x_grad size is:%d.",
n,
x_grad.size()));
// x_grad is output, so save each data address, then copy each dy into dx_data
std::vector<T*> outputs(n);
for (size_t j = 0; j < x_grad.size(); ++j) {
if (x_grad[j] == nullptr) {
outputs[j] = nullptr;
continue;
}
if (x_grad[j]->numel() != 0UL) {
T* ptr = dev_ctx.template Alloc<T>(x_grad[j]);
outputs[j] = ptr;
} else {
outputs[j] = nullptr;
}
}
auto dy_data = out.data<T>();
// each x_grad should have same shape
int dy_pre = 1, dy_suf = 1;
auto dy_dims = out.dims();
int split_dim = n;
for (int i = 0; i < axis; ++i) {
dy_pre *= dy_dims[i];
}
dy_suf = out.numel() / (split_dim * dy_pre);
auto tmp_out_data =
paddle::memory::Alloc(dev_ctx, outputs.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_out_data->ptr(),
phi::CPUPlace(),
reinterpret_cast<void*>(outputs.data()),
outputs.size() * sizeof(T*),
dev_ctx.stream());
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, dy_pre * split_dim * dy_suf);
if (out.numel() < std::numeric_limits<int32_t>::max()) {
hipLaunchKernelGGL(( UnStackHelperCUDAKernel<T, int32_t>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), dy_data,
dy_pre,
split_dim,
dy_suf,
split_dim,
reinterpret_cast<T**>(tmp_out_data->ptr()));
} else {
hipLaunchKernelGGL(( UnStackHelperCUDAKernel<T, int64_t>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), dy_data,
dy_pre,
split_dim,
dy_suf,
split_dim,
reinterpret_cast<T**>(tmp_out_data->ptr()));
}
}
} // namespace phi
PD_REGISTER_KERNEL(stack_grad,
GPU,
ALL_LAYOUT,
phi::StackGradKernel,
float,
double,
int64_t,
int,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| 814c5dcd86640c4013a8e3d934dec331463779bf.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/stack_grad_kernel.h"
namespace phi {
template <typename T, typename IntType>
__global__ void UnStackHelperCUDAKernel(const T* __restrict__ input,
int pre_dim_size,
int split_dim_size,
int suf_dim_size,
int num_split,
T** output_ptrs) {
assert(blockDim.y == 1);
assert(blockDim.z == 1);
// In this case they are equal
assert(split_dim_size % num_split == 0);
IntType size = pre_dim_size * split_dim_size * suf_dim_size;
IntType each_dim_size = split_dim_size / num_split;
for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size;
offset += blockDim.x * gridDim.x) {
IntType i = offset / (split_dim_size * suf_dim_size);
IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size;
IntType k = offset % suf_dim_size;
T* output = output_ptrs[j / each_dim_size];
if (output == nullptr) {
return;
}
IntType output_ind = i * each_dim_size * suf_dim_size +
(j % each_dim_size) * suf_dim_size + k;
*(output + output_ind) = input[offset];
}
}
template <typename T, typename Context>
void StackGradKernel(const Context& dev_ctx,
const DenseTensor& out,
int axis,
std::vector<DenseTensor*> x_grad) {
if (axis < 0) axis += out.dims().size();
int n = out.dims()[axis];
PADDLE_ENFORCE_EQ(n,
x_grad.size(),
phi::errors::InvalidArgument(
"Output x_grad size should be equal to n, but"
" received n is:%d x_grad size is:%d.",
n,
x_grad.size()));
// x_grad is output, so save each data address, then copy each dy into dx_data
std::vector<T*> outputs(n);
for (size_t j = 0; j < x_grad.size(); ++j) {
if (x_grad[j] == nullptr) {
outputs[j] = nullptr;
continue;
}
if (x_grad[j]->numel() != 0UL) {
T* ptr = dev_ctx.template Alloc<T>(x_grad[j]);
outputs[j] = ptr;
} else {
outputs[j] = nullptr;
}
}
auto dy_data = out.data<T>();
// each x_grad should have same shape
int dy_pre = 1, dy_suf = 1;
auto dy_dims = out.dims();
int split_dim = n;
for (int i = 0; i < axis; ++i) {
dy_pre *= dy_dims[i];
}
dy_suf = out.numel() / (split_dim * dy_pre);
auto tmp_out_data =
paddle::memory::Alloc(dev_ctx, outputs.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_out_data->ptr(),
phi::CPUPlace(),
reinterpret_cast<void*>(outputs.data()),
outputs.size() * sizeof(T*),
dev_ctx.stream());
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, dy_pre * split_dim * dy_suf);
if (out.numel() < std::numeric_limits<int32_t>::max()) {
UnStackHelperCUDAKernel<T, int32_t>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(dy_data,
dy_pre,
split_dim,
dy_suf,
split_dim,
reinterpret_cast<T**>(tmp_out_data->ptr()));
} else {
UnStackHelperCUDAKernel<T, int64_t>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(dy_data,
dy_pre,
split_dim,
dy_suf,
split_dim,
reinterpret_cast<T**>(tmp_out_data->ptr()));
}
}
} // namespace phi
PD_REGISTER_KERNEL(stack_grad,
GPU,
ALL_LAYOUT,
phi::StackGradKernel,
float,
double,
int64_t,
int,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
d432002c4ca7816c1cc1a0d648bc5843888ede0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define ulong unsigned long long
#define uint unsigned int
//mod
//A*B%MODP
/*
__device__ uint ABModC(uint a,uint b){
ulong tmp=((ulong)(__umulhi(a,b)))*(1ULL<<32)+(ulong)(a*b);
return (uint)(tmp%MODP);
}
*/
//
__device__ uint ABModC(uint a,uint b){
uint blo=a*b;
uint bhi=__umulhi(a,b);
uint b1=bhi*(1<<(32-MODLOG))+blo/(1<<MODLOG);
uint b2lo=b1*rMODP;
uint b2hi=__umulhi(b1,rMODP);
uint b2=b2hi*(1<<(32-MODLOG))+b2lo/(1<<MODLOG);
uint b2alo=b2*MODP;
uint b2ahi=__umulhi(b2,MODP);
bhi-=b2ahi;
if (blo<b2alo)bhi-=1;
blo-=b2alo;
//b3MODP-1
if (bhi>0){
if (blo<MODP)bhi-=1;
blo-=MODP;
}
//b3MODP-1
if (blo>=MODP){
blo-=MODP;
}
//b2MODP-1
if (blo>=MODP){
blo-=MODP;
}
return blo;
}
/*
//
ulong MontgomeryReduction(ulong t)
{
ulong tc = t%4294967296;
ulong c = tc * NR;
//c %= 4294967296;
c &= MASK;
c *= MODP;
c += t;
c >>= NB;
if (c >= MODP)c -= MODP;
return c;
}
//a*b mod MOPD
uint MontgomeryMul(uint a,uint b){
return (uint)MontgomeryReduction(MontgomeryReduction((ulong)a * (ulong)b) * R2);
}
//abmod MOPD
uint MontgomeryExp(uint a,uint b){
ulong p = MontgomeryReduction((ulong)a * R2);
ulong x = MontgomeryReduction(R2);
uint y = b;
while (y!=0){
if (y%2==1){
x = MontgomeryReduction(x * p);
}
p = MontgomeryReduction(p * p);
y >>= 1;
}
return (uint)MontgomeryReduction(x);
}
*/
//exp(a,b)%MODP
__device__ uint ModExp(uint a,uint b){
uint ans=1;
uint aa=a;
while(b!=0){
if (b%2==1) ans=ABModC(ans,aa);
aa=ABModC(aa,aa);
b/=2;
}
return ans;
}
//FFTN
// a/arrayLength mod P
__device__ uint DivN_f(uint a,uint arrayLength)
{
uint as =a/arrayLength;
uint ar =a-as*arrayLength;
uint pn =MODP/arrayLength;
if (ar!=0){
as+=(arrayLength-ar)*pn+1;
}
return as;
}
//arrayLength2 = arrayLength/2
__global__ void FMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,t2*(arrayLength2/loopCnt_Pow2));
w1=ABModC(arrayAt1,w0);
r0=arrayAt0-w1+MODP;
r1=arrayAt0+w1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
arrayA[t1]=r0;
arrayA[t0]=r1;
}
__global__ void uFMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,t2*(arrayLength2/loopCnt_Pow2));
r0=arrayAt0-arrayAt1+MODP;
r1=arrayAt0+arrayAt1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
w1=ABModC(r0,w0);
arrayA[t1]=w1;
arrayA[t0]=r1;
}
__global__ void iFMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,arrayLength2*2-t2*(arrayLength2/loopCnt_Pow2));
w1=ABModC(arrayAt1,w0);
r0=arrayAt0-w1+MODP;
r1=arrayAt0+w1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
arrayA[t1]=r0;
arrayA[t0]=r1;
}
__global__ void iuFMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,arrayLength2*2-t2*(arrayLength2/loopCnt_Pow2));
r0=arrayAt0-arrayAt1+MODP;
r1=arrayAt0+arrayAt1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
w1=ABModC(r0,w0);
arrayA[t1]=w1;
arrayA[t0]=r1;
}
//
__global__ void Mul_i_i(uint *arrayA,uint *arrayB ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint w0;
w0=ABModC(arrayB[idx],arrayA[idx]);
arrayB[idx]=w0;
}
//N
__global__ void DivN(uint *arrayA,uint arrayLength ) {
uint idx = threadIdx.x+blockIdx.x*256;
arrayA[idx]=DivN_f(arrayA[idx],arrayLength);
}
//
//sqrt_omega2N1 (mod P)
//a[0]*=ModExp(sqrt_omega,0)
//a[1]*=ModExp(sqrt_omega,1)
//a[2]*=ModExp(sqrt_omega,2)
//a[3]*=ModExp(sqrt_omega,3)
__global__ void PreNegFMT(uint *arrayA,uint *arrayB,uint sqrt_omega,uint arrayLength) {
uint idx = threadIdx.x+blockIdx.x*256;
uint w0=ModExp(sqrt_omega,idx);
arrayB[idx]=ABModC(arrayA[idx],w0);
}
//
//sqrt_omega2N1 (mod P)
//a[0]*=ModExp(sqrt_omega,-0)
//a[1]*=ModExp(sqrt_omega,-1)
//a[2]*=ModExp(sqrt_omega,-2)
//a[3]*=ModExp(sqrt_omega,-3)
__global__ void PostNegFMT(uint *arrayA,uint sqrt_omega,uint arrayLength) {
uint idx = threadIdx.x+blockIdx.x*256;
uint w0=ModExp(sqrt_omega,arrayLength*2-idx);
arrayA[idx]=ABModC(arrayA[idx],w0);
}
//
__global__ void PosNeg_To_HiLo(uint *arrayE,uint *arrayA,uint *arrayB,uint arrayLength) {
uint idx = threadIdx.x+blockIdx.x*256;
uint a=arrayA[idx];
uint b=arrayB[idx];
uint subab=(a-b+MODP);//(a-b)/2
uint flag=subab%2;
subab-=MODP*((subab>=MODP)*2-1)*flag;//
subab/=2;//(a-b)/2 MOD P
arrayE[idx+arrayLength]=subab;//(a-b)/2 MOD P
arrayE[idx]=a-subab+MODP*(a<subab);//a-((a-b)/2)=a/2+b/2 (a+b)/2
}
//vram
//PostNegFMTDivNPosNeg_To_HiLo
__global__ void PostFMT_DivN_HiLo(uint *arrayE,uint *arrayA,uint *arrayB,uint arrayLength,uint sqrt_omega) {
uint idx = threadIdx.x+blockIdx.x*256;
uint a=arrayA[idx];
uint b=arrayB[idx];
//
uint w0=ModExp(sqrt_omega,idx+(idx%2)*arrayLength);
b=ABModC(b,w0);
//N
a=DivN_f(a,arrayLength);
b=DivN_f(b,arrayLength);
//
uint subab=(a-b+MODP);//(a-b)/2
uint flag=subab%2;
subab-=MODP*((subab>=MODP)*2-1)*flag;//
subab/=2;//(a-b)/2 MOD P
arrayE[idx+arrayLength]=subab;//(a-b)/2 MOD P
arrayE[idx]=a-subab+MODP*(a<subab);//a-((a-b)/2)=a/2+b/2 (a+b)/2
} | d432002c4ca7816c1cc1a0d648bc5843888ede0f.cu | #define ulong unsigned long long
#define uint unsigned int
//従来のmod関数
//A*B%MODP
/*
__device__ uint ABModC(uint a,uint b){
ulong tmp=((ulong)(__umulhi(a,b)))*(1ULL<<32)+(ulong)(a*b);
return (uint)(tmp%MODP);
}
*/
//除数決め打ちなのを利用して乗算だけに書き換えたバージョン 自作
__device__ uint ABModC(uint a,uint b){
uint blo=a*b;
uint bhi=__umulhi(a,b);
uint b1=bhi*(1<<(32-MODLOG))+blo/(1<<MODLOG);
uint b2lo=b1*rMODP;
uint b2hi=__umulhi(b1,rMODP);
uint b2=b2hi*(1<<(32-MODLOG))+b2lo/(1<<MODLOG);
uint b2alo=b2*MODP;
uint b2ahi=__umulhi(b2,MODP);
bhi-=b2ahi;
if (blo<b2alo)bhi-=1;
blo-=b2alo;
//この時点でbは最大3MODP-1
if (bhi>0){
if (blo<MODP)bhi-=1;
blo-=MODP;
}
//この時点でbの最大は3MODP-1
if (blo>=MODP){
blo-=MODP;
}
//この時点でbの最大は2MODP-1
if (blo>=MODP){
blo-=MODP;
}
return blo;
}
/*
//モンゴメリ
ulong MontgomeryReduction(ulong t)
{
ulong tc = t%4294967296;
ulong c = tc * NR;
//c %= 4294967296;
c &= MASK;
c *= MODP;
c += t;
c >>= NB;
if (c >= MODP)c -= MODP;
return c;
}
//a*b mod MOPDを返す
uint MontgomeryMul(uint a,uint b){
return (uint)MontgomeryReduction(MontgomeryReduction((ulong)a * (ulong)b) * R2);
}
//aのb乗mod MOPDを返す
uint MontgomeryExp(uint a,uint b){
ulong p = MontgomeryReduction((ulong)a * R2);
ulong x = MontgomeryReduction(R2);
uint y = b;
while (y!=0){
if (y%2==1){
x = MontgomeryReduction(x * p);
}
p = MontgomeryReduction(p * p);
y >>= 1;
}
return (uint)MontgomeryReduction(x);
}
*/
//exp(a,b)%MODP
__device__ uint ModExp(uint a,uint b){
uint ans=1;
uint aa=a;
while(b!=0){
if (b%2==1) ans=ABModC(ans,aa);
aa=ABModC(aa,aa);
b/=2;
}
return ans;
}
//逆変換後は、FFTでいうNで除算しないといけない。
// a/arrayLength mod P
__device__ uint DivN_f(uint a,uint arrayLength)
{
uint as =a/arrayLength;
uint ar =a-as*arrayLength;
uint pn =MODP/arrayLength;
if (ar!=0){
as+=(arrayLength-ar)*pn+1;
}
return as;
}
//arrayLength2 = arrayLength/2
__global__ void FMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,t2*(arrayLength2/loopCnt_Pow2));
w1=ABModC(arrayAt1,w0);
r0=arrayAt0-w1+MODP;
r1=arrayAt0+w1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
arrayA[t1]=r0;
arrayA[t0]=r1;
}
__global__ void uFMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,t2*(arrayLength2/loopCnt_Pow2));
r0=arrayAt0-arrayAt1+MODP;
r1=arrayAt0+arrayAt1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
w1=ABModC(r0,w0);
arrayA[t1]=w1;
arrayA[t0]=r1;
}
__global__ void iFMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,arrayLength2*2-t2*(arrayLength2/loopCnt_Pow2));
w1=ABModC(arrayAt1,w0);
r0=arrayAt0-w1+MODP;
r1=arrayAt0+w1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
arrayA[t1]=r0;
arrayA[t0]=r1;
}
__global__ void iuFMT(uint *arrayA,uint loopCnt_Pow2,uint omega,uint arrayLength2 ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint t2 = idx%loopCnt_Pow2;
uint t0 = idx*2-t2;
uint t1 = t0+loopCnt_Pow2;
uint w0;
uint w1;
uint arrayAt0=arrayA[t0];
uint arrayAt1=arrayA[t1];
uint r0;
uint r1;
w0=ModExp(omega,arrayLength2*2-t2*(arrayLength2/loopCnt_Pow2));
r0=arrayAt0-arrayAt1+MODP;
r1=arrayAt0+arrayAt1;
if (r0>=MODP){r0-=MODP;}
if (r1>=MODP){r1-=MODP;}
w1=ABModC(r0,w0);
arrayA[t1]=w1;
arrayA[t0]=r1;
}
//同じ要素同士の掛け算
__global__ void Mul_i_i(uint *arrayA,uint *arrayB ) {
uint idx = threadIdx.x+blockIdx.x*256;
uint w0;
w0=ABModC(arrayB[idx],arrayA[idx]);
arrayB[idx]=w0;
}
//逆変換後のNで割るやつ。剰余下で割るには特殊処理が必要
__global__ void DivN(uint *arrayA,uint arrayLength ) {
uint idx = threadIdx.x+blockIdx.x*256;
arrayA[idx]=DivN_f(arrayA[idx],arrayLength);
}
//負巡回計算の前処理
//sqrt_omegaの2N乗が1 (mod P)
//a[0]*=ModExp(sqrt_omega,0)
//a[1]*=ModExp(sqrt_omega,1)
//a[2]*=ModExp(sqrt_omega,2)
//a[3]*=ModExp(sqrt_omega,3)
__global__ void PreNegFMT(uint *arrayA,uint *arrayB,uint sqrt_omega,uint arrayLength) {
uint idx = threadIdx.x+blockIdx.x*256;
uint w0=ModExp(sqrt_omega,idx);
arrayB[idx]=ABModC(arrayA[idx],w0);
}
//負巡回計算の後処理
//sqrt_omegaの2N乗が1 (mod P)
//a[0]*=ModExp(sqrt_omega,-0)
//a[1]*=ModExp(sqrt_omega,-1)
//a[2]*=ModExp(sqrt_omega,-2)
//a[3]*=ModExp(sqrt_omega,-3)
__global__ void PostNegFMT(uint *arrayA,uint sqrt_omega,uint arrayLength) {
uint idx = threadIdx.x+blockIdx.x*256;
uint w0=ModExp(sqrt_omega,arrayLength*2-idx);
arrayA[idx]=ABModC(arrayA[idx],w0);
}
//負巡回計算と正巡回計算結果から、上半分桁と下半分桁を求める
__global__ void PosNeg_To_HiLo(uint *arrayE,uint *arrayA,uint *arrayB,uint arrayLength) {
uint idx = threadIdx.x+blockIdx.x*256;
uint a=arrayA[idx];
uint b=arrayB[idx];
uint subab=(a-b+MODP);//まず(a-b)/2を求めたい
uint flag=subab%2;
subab-=MODP*((subab>=MODP)*2-1)*flag;//ここで絶対偶数になる
subab/=2;//(a-b)/2 MOD Pを算出
arrayE[idx+arrayLength]=subab;//上位桁は(a-b)/2 MOD P
arrayE[idx]=a-subab+MODP*(a<subab);//a-((a-b)/2)=a/2+b/2 つまり(a+b)/2が下位桁
}
//vramへの書き込み回数を減らす目的に作った関数
//PostNegFMT関数とDivN関数とPosNeg_To_HiLo関数の統合版
__global__ void PostFMT_DivN_HiLo(uint *arrayE,uint *arrayA,uint *arrayB,uint arrayLength,uint sqrt_omega) {
uint idx = threadIdx.x+blockIdx.x*256;
uint a=arrayA[idx];
uint b=arrayB[idx];
//ここは負巡回の後処理計算部分
uint w0=ModExp(sqrt_omega,idx+(idx%2)*arrayLength);
b=ABModC(b,w0);
//Nで除算する関数
a=DivN_f(a,arrayLength);
b=DivN_f(b,arrayLength);
//あとは一緒
uint subab=(a-b+MODP);//まず(a-b)/2を求めたい
uint flag=subab%2;
subab-=MODP*((subab>=MODP)*2-1)*flag;//ここで絶対偶数になる
subab/=2;//(a-b)/2 MOD Pを算出
arrayE[idx+arrayLength]=subab;//上位桁は(a-b)/2 MOD P
arrayE[idx]=a-subab+MODP*(a<subab);//a-((a-b)/2)=a/2+b/2 つまり(a+b)/2が下位桁
} |
eb918136aaa4b49d204cdd5bf574feb5447fd576.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void prepare_indices(const unsigned num_keys, unsigned *data) {
unsigned index = threadIdx.x +
blockIdx.x * blockDim.x +
blockIdx.y * blockDim.x * gridDim.x;
if (index < num_keys) {
data[index] = index;
}
} | eb918136aaa4b49d204cdd5bf574feb5447fd576.cu | #include "includes.h"
__global__ void prepare_indices(const unsigned num_keys, unsigned *data) {
unsigned index = threadIdx.x +
blockIdx.x * blockDim.x +
blockIdx.y * blockDim.x * gridDim.x;
if (index < num_keys) {
data[index] = index;
}
} |
6372975013f98321f731de678cfb7d2f3c8ac642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
#define ALLOC_SIZE 1024
__global__ void access_offset_kernel(int offset) {
int* devMem = (int*)malloc(ALLOC_SIZE*sizeof(int));
#ifdef R
if (offset >= 0)
volatile int i = devMem[(ALLOC_SIZE-1) + offset];
else
volatile int i = devMem[offset];
#elif W
if (offset >= 0)
devMem[(ALLOC_SIZE-1) + offset] = 42;
else
devMem[offset] = 42;
#endif
free(devMem);
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
hipThreadSetLimit(hipLimitMallocHeapSize, ALLOC_SIZE*4*sizeof(int));
hipLaunchKernelGGL(( access_offset_kernel), dim3(1),dim3(1), 0, 0, offset);
hipDeviceReset();
return 0;
}
| 6372975013f98321f731de678cfb7d2f3c8ac642.cu | #include <stdio.h>
#include <unistd.h>
#define ALLOC_SIZE 1024
__global__ void access_offset_kernel(int offset) {
int* devMem = (int*)malloc(ALLOC_SIZE*sizeof(int));
#ifdef R
if (offset >= 0)
volatile int i = devMem[(ALLOC_SIZE-1) + offset];
else
volatile int i = devMem[offset];
#elif W
if (offset >= 0)
devMem[(ALLOC_SIZE-1) + offset] = 42;
else
devMem[offset] = 42;
#endif
free(devMem);
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
cudaThreadSetLimit(cudaLimitMallocHeapSize, ALLOC_SIZE*4*sizeof(int));
access_offset_kernel<<<1,1>>>(offset);
cudaDeviceReset();
return 0;
}
|
be3181c6dac45c22b3e0949f055499acbefcfcb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transposedMatrixKernel(int* d_a, int* d_b) {
// -:YOUR CODE HERE:-
} | be3181c6dac45c22b3e0949f055499acbefcfcb3.cu | #include "includes.h"
__global__ void transposedMatrixKernel(int* d_a, int* d_b) {
// -:YOUR CODE HERE:-
} |
76f6fa3d0f80ee48a6d36385e4dbd15c1b433d19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int bindex = blockIdx.y;
int tindex = threadIdx.x;
int offset = tindex * numCols + bindex;
uchar4 rgba = rgbaImage[offset];
greyImage[offset] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1); //TODO
const dim3 gridSize( 1, numCols, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 76f6fa3d0f80ee48a6d36385e4dbd15c1b433d19.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int bindex = blockIdx.y;
int tindex = threadIdx.x;
int offset = tindex * numCols + bindex;
uchar4 rgba = rgbaImage[offset];
greyImage[offset] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1); //TODO
const dim3 gridSize( 1, numCols, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
927c7b99b3c417a28ea89d0b62cf8abe7fc1df62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2022-2023 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file celeritas/user/DetectorSteps.cu
//---------------------------------------------------------------------------//
#include "DetectorSteps.hh"
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include "corecel/data/Collection.hh"
#include "corecel/data/Copier.hh"
#include "corecel/sys/Device.hh"
#include "corecel/sys/KernelParamCalculator.device.hh"
#include "corecel/sys/Stream.hh"
#include "StepData.hh"
namespace celeritas
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
/*!
* Gather results from active tracks that are in a detector.
*/
__global__ void
gather_step_kernel(DeviceRef<StepStateData> const state, size_type num_valid)
{
CELER_EXPECT(state.size() == state.scratch.size()
&& state.size() >= num_valid);
TrackSlotId tid{KernelParamCalculator::thread_id().get()};
if (!(tid < num_valid))
{
return;
}
#define DS_FAST_GET(CONT, TID) CONT.data().get()[TID.unchecked_get()]
TrackSlotId valid_tid{DS_FAST_GET(state.valid_id, tid)};
CELER_ASSERT(valid_tid < state.size());
// Equivalent to `CONT[tid]` but without debug checking, which causes this
// function to grow large enough to emit warnings
#define DS_COPY_IF_SELECTED(FIELD) \
do \
{ \
if (!state.data.FIELD.empty()) \
{ \
DS_FAST_GET(state.scratch.FIELD, tid) \
= DS_FAST_GET(state.data.FIELD, valid_tid); \
} \
} while (0)
DS_COPY_IF_SELECTED(detector);
DS_COPY_IF_SELECTED(track_id);
for (auto sp : range(StepPoint::size_))
{
DS_COPY_IF_SELECTED(points[sp].time);
DS_COPY_IF_SELECTED(points[sp].pos);
DS_COPY_IF_SELECTED(points[sp].dir);
DS_COPY_IF_SELECTED(points[sp].energy);
}
DS_COPY_IF_SELECTED(event_id);
DS_COPY_IF_SELECTED(parent_id);
DS_COPY_IF_SELECTED(track_step_count);
DS_COPY_IF_SELECTED(step_length);
DS_COPY_IF_SELECTED(particle);
DS_COPY_IF_SELECTED(energy_deposition);
#undef DS_COPY_IF_SELECTED
}
//---------------------------------------------------------------------------//
// KERNEL INTERFACE
//---------------------------------------------------------------------------//
/*!
* Gather results from active tracks that are in a detector.
*/
void gather_step(DeviceRef<StepStateData> const& state, size_type num_valid)
{
if (num_valid == 0)
{
// No valid tracks
return;
}
CELER_LAUNCH_KERNEL(gather_step,
celeritas::device().default_block_size(),
num_valid,
celeritas::device().stream(state.stream_id).get(),
state,
num_valid);
}
//---------------------------------------------------------------------------//
// HELPER FUNCTIONS
//---------------------------------------------------------------------------//
template<class T>
using StateRef
= celeritas::StateCollection<T, Ownership::reference, MemSpace::device>;
//---------------------------------------------------------------------------//
struct HasDetector
{
CELER_FORCEINLINE_FUNCTION bool operator()(DetectorId const& d)
{
return static_cast<bool>(d);
}
};
//---------------------------------------------------------------------------//
template<class T>
void copy_field(std::vector<T>* dst, StateRef<T> const& src, size_type num_valid)
{
if (src.empty() || num_valid == 0)
{
// This attribute is not in use
dst->clear();
return;
}
dst->resize(num_valid);
// Copy all items from valid threads
Copier<T, MemSpace::host> copy{{dst->data(), num_valid}};
copy(MemSpace::device, {src.data().get(), num_valid});
}
//---------------------------------------------------------------------------//
} // namespace
//---------------------------------------------------------------------------//
/*!
* Copy to host results from tracks that interacted with a detector.
*/
template<>
void copy_steps<MemSpace::device>(
DetectorStepOutput* output,
StepStateData<Ownership::reference, MemSpace::device> const& state)
{
CELER_EXPECT(output);
// Store the thread IDs of active tracks that are in a detector
auto start = thrust::device_pointer_cast(state.valid_id.data().get());
auto end = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator(size_type(0)),
thrust::make_counting_iterator(state.size()),
thrust::device_pointer_cast(state.data.detector.data().get()),
start,
HasDetector{});
// Get the number of threads that are active and in a detector
size_type num_valid = end - start;
// Gather the step data on device
gather_step(state, num_valid);
// Resize and copy if the fields are present
#define DS_ASSIGN(FIELD) \
copy_field(&(output->FIELD), state.scratch.FIELD, num_valid)
DS_ASSIGN(detector);
DS_ASSIGN(track_id);
for (auto sp : range(StepPoint::size_))
{
DS_ASSIGN(points[sp].time);
DS_ASSIGN(points[sp].pos);
DS_ASSIGN(points[sp].dir);
DS_ASSIGN(points[sp].energy);
}
DS_ASSIGN(event_id);
DS_ASSIGN(parent_id);
DS_ASSIGN(track_step_count);
DS_ASSIGN(step_length);
DS_ASSIGN(particle);
DS_ASSIGN(energy_deposition);
#undef DS_ASSIGN
CELER_ENSURE(output->detector.size() == num_valid);
CELER_ENSURE(output->track_id.size() == num_valid);
}
//---------------------------------------------------------------------------//
} // namespace celeritas
| 927c7b99b3c417a28ea89d0b62cf8abe7fc1df62.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2022-2023 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file celeritas/user/DetectorSteps.cu
//---------------------------------------------------------------------------//
#include "DetectorSteps.hh"
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include "corecel/data/Collection.hh"
#include "corecel/data/Copier.hh"
#include "corecel/sys/Device.hh"
#include "corecel/sys/KernelParamCalculator.device.hh"
#include "corecel/sys/Stream.hh"
#include "StepData.hh"
namespace celeritas
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
/*!
* Gather results from active tracks that are in a detector.
*/
__global__ void
gather_step_kernel(DeviceRef<StepStateData> const state, size_type num_valid)
{
CELER_EXPECT(state.size() == state.scratch.size()
&& state.size() >= num_valid);
TrackSlotId tid{KernelParamCalculator::thread_id().get()};
if (!(tid < num_valid))
{
return;
}
#define DS_FAST_GET(CONT, TID) CONT.data().get()[TID.unchecked_get()]
TrackSlotId valid_tid{DS_FAST_GET(state.valid_id, tid)};
CELER_ASSERT(valid_tid < state.size());
// Equivalent to `CONT[tid]` but without debug checking, which causes this
// function to grow large enough to emit warnings
#define DS_COPY_IF_SELECTED(FIELD) \
do \
{ \
if (!state.data.FIELD.empty()) \
{ \
DS_FAST_GET(state.scratch.FIELD, tid) \
= DS_FAST_GET(state.data.FIELD, valid_tid); \
} \
} while (0)
DS_COPY_IF_SELECTED(detector);
DS_COPY_IF_SELECTED(track_id);
for (auto sp : range(StepPoint::size_))
{
DS_COPY_IF_SELECTED(points[sp].time);
DS_COPY_IF_SELECTED(points[sp].pos);
DS_COPY_IF_SELECTED(points[sp].dir);
DS_COPY_IF_SELECTED(points[sp].energy);
}
DS_COPY_IF_SELECTED(event_id);
DS_COPY_IF_SELECTED(parent_id);
DS_COPY_IF_SELECTED(track_step_count);
DS_COPY_IF_SELECTED(step_length);
DS_COPY_IF_SELECTED(particle);
DS_COPY_IF_SELECTED(energy_deposition);
#undef DS_COPY_IF_SELECTED
}
//---------------------------------------------------------------------------//
// KERNEL INTERFACE
//---------------------------------------------------------------------------//
/*!
* Gather results from active tracks that are in a detector.
*/
void gather_step(DeviceRef<StepStateData> const& state, size_type num_valid)
{
if (num_valid == 0)
{
// No valid tracks
return;
}
CELER_LAUNCH_KERNEL(gather_step,
celeritas::device().default_block_size(),
num_valid,
celeritas::device().stream(state.stream_id).get(),
state,
num_valid);
}
//---------------------------------------------------------------------------//
// HELPER FUNCTIONS
//---------------------------------------------------------------------------//
template<class T>
using StateRef
= celeritas::StateCollection<T, Ownership::reference, MemSpace::device>;
//---------------------------------------------------------------------------//
struct HasDetector
{
CELER_FORCEINLINE_FUNCTION bool operator()(DetectorId const& d)
{
return static_cast<bool>(d);
}
};
//---------------------------------------------------------------------------//
template<class T>
void copy_field(std::vector<T>* dst, StateRef<T> const& src, size_type num_valid)
{
if (src.empty() || num_valid == 0)
{
// This attribute is not in use
dst->clear();
return;
}
dst->resize(num_valid);
// Copy all items from valid threads
Copier<T, MemSpace::host> copy{{dst->data(), num_valid}};
copy(MemSpace::device, {src.data().get(), num_valid});
}
//---------------------------------------------------------------------------//
} // namespace
//---------------------------------------------------------------------------//
/*!
* Copy to host results from tracks that interacted with a detector.
*/
template<>
void copy_steps<MemSpace::device>(
DetectorStepOutput* output,
StepStateData<Ownership::reference, MemSpace::device> const& state)
{
CELER_EXPECT(output);
// Store the thread IDs of active tracks that are in a detector
auto start = thrust::device_pointer_cast(state.valid_id.data().get());
auto end = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator(size_type(0)),
thrust::make_counting_iterator(state.size()),
thrust::device_pointer_cast(state.data.detector.data().get()),
start,
HasDetector{});
// Get the number of threads that are active and in a detector
size_type num_valid = end - start;
// Gather the step data on device
gather_step(state, num_valid);
// Resize and copy if the fields are present
#define DS_ASSIGN(FIELD) \
copy_field(&(output->FIELD), state.scratch.FIELD, num_valid)
DS_ASSIGN(detector);
DS_ASSIGN(track_id);
for (auto sp : range(StepPoint::size_))
{
DS_ASSIGN(points[sp].time);
DS_ASSIGN(points[sp].pos);
DS_ASSIGN(points[sp].dir);
DS_ASSIGN(points[sp].energy);
}
DS_ASSIGN(event_id);
DS_ASSIGN(parent_id);
DS_ASSIGN(track_step_count);
DS_ASSIGN(step_length);
DS_ASSIGN(particle);
DS_ASSIGN(energy_deposition);
#undef DS_ASSIGN
CELER_ENSURE(output->detector.size() == num_valid);
CELER_ENSURE(output->track_id.size() == num_valid);
}
//---------------------------------------------------------------------------//
} // namespace celeritas
|
4d864713641dfc2061e8a775f76c27260df49f67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
hipLaunchKernelGGL(( assign_kernel<DT>), dim3(GET_BLOCKS(rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
ptr, rect.volume(), 0.0f);
checkCUDA(hipDeviceSynchronize());
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<int32_t, 1>;
template class TensorAccessorR<int32_t, 2>;
template class TensorAccessorR<int32_t, 3>;
template class TensorAccessorR<int64_t, 1>;
template class TensorAccessorR<int64_t, 2>;
template class TensorAccessorR<int64_t, 3>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<int32_t, 1>;
template class TensorAccessorW<int32_t, 2>;
template class TensorAccessorW<int32_t, 3>;
template class TensorAccessorW<int64_t, 1>;
template class TensorAccessorW<int64_t, 2>;
template class TensorAccessorW<int64_t, 3>;
| 4d864713641dfc2061e8a775f76c27260df49f67.cu | #include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
assign_kernel<DT><<<GET_BLOCKS(rect.volume()), CUDA_NUM_THREADS>>>(
ptr, rect.volume(), 0.0f);
checkCUDA(cudaDeviceSynchronize());
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<int32_t, 1>;
template class TensorAccessorR<int32_t, 2>;
template class TensorAccessorR<int32_t, 3>;
template class TensorAccessorR<int64_t, 1>;
template class TensorAccessorR<int64_t, 2>;
template class TensorAccessorR<int64_t, 3>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<int32_t, 1>;
template class TensorAccessorW<int32_t, 2>;
template class TensorAccessorW<int32_t, 3>;
template class TensorAccessorW<int64_t, 1>;
template class TensorAccessorW<int64_t, 2>;
template class TensorAccessorW<int64_t, 3>;
|
b8ec4e182738eb0b583b29f2d4a50bffd33fc95f.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file pr_nibble_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/pr_nibble_aj/pr_nibble_enactor_aj.cuh>
#include <gunrock/app/pr_nibble_aj/pr_nibble_test_aj.cuh>
namespace gunrock {
namespace app {
namespace pr_nibble_aj {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
// app specific parameters
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<double>(
"eps",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1e-9, "epsilon parameter.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<double>(
"alpha",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0.15,
"alpha parameter", // <TODO: DOCS>
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"max-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1000, "Max number of iterations", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run pr_nibble tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_values Array of CPU reference values
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::ValueT **ref_values,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("pr_nibble", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
ValueT *h_values = new ValueT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
printf("Initializing Problem\n");
GUARD_CU(problem.Init(graph, target));
printf("Initializing Enactor\n");
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
VertexT src = srcs[run_index];
VertexT src_neib = graph.GetEdgeDest(graph.GetNeighborListOffset(src));
printf("Resetting Problem\n");
GUARD_CU(problem.Reset(src, src_neib, target));
printf("Resetting Enactor\n");
GUARD_CU(enactor.Reset(src, src_neib, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
printf("Calling Enactor\n");
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_values));
SizeT num_errors = Validate_Results(parameters, graph, h_values,
ref_values[run_index], false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_values));
if (validation == "last") {
auto run_index = (num_runs - 1) % num_srcs;
SizeT num_errors = Validate_Results(parameters, graph, h_values,
ref_values[run_index], false);
}
// compute running statistics
// <TODO> change NULL to problem specific per-vertex visited marker, e.g.
// h_distances info.ComputeTraversalStats(enactor, (VertexT*)NULL);
// Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
// #endif
// </TODO>
// Clean up
// GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_values;
h_values = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace pr_nibble_aj
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_template function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_pr_nibble(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT *h_values) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::pr_nibble_aj::Problem<GraphT> ProblemT;
typedef gunrock::app::pr_nibble_aj::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
VertexT src = srcs[run_index];
VertexT src_neib = graph.GetEdgeDest(graph.GetNeighborListOffset(src));
problem.Reset(src, src_neib, target);
enactor.Reset(src, src_neib, target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(h_values);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
template <typename VertexT = int, typename ValueT = float, typename SizeT = int,
typename GValueT = unsigned int, typename TValueT = GValueT>
float pr_nibble(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs,
VertexT *sources, ValueT *h_values) {
// TODO: change to other graph representation, if not using CSR
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("pr_nibble");
gunrock::graphio::UseParameters(parameters);
gunrock::app::pr_nibble_aj::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, num_edges,
gunrock::util::HOST);
// graph.CsrT::edge_values .SetPointer(edge_values, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the pr_nibble
double elapsed_time = gunrock_pr_nibble(parameters, graph, h_values);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| b8ec4e182738eb0b583b29f2d4a50bffd33fc95f.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file pr_nibble_app.cu
*
* @brief Simple Gunrock Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/pr_nibble_aj/pr_nibble_enactor_aj.cuh>
#include <gunrock/app/pr_nibble_aj/pr_nibble_test_aj.cuh>
namespace gunrock {
namespace app {
namespace pr_nibble_aj {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
// app specific parameters
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<double>(
"eps",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1e-9, "epsilon parameter.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<double>(
"alpha",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0.15,
"alpha parameter", // <TODO: DOCS>
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"max-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1000, "Max number of iterations", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run pr_nibble tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_values Array of CPU reference values
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::ValueT **ref_values,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("pr_nibble", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
ValueT *h_values = new ValueT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
printf("Initializing Problem\n");
GUARD_CU(problem.Init(graph, target));
printf("Initializing Enactor\n");
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
VertexT src = srcs[run_index];
VertexT src_neib = graph.GetEdgeDest(graph.GetNeighborListOffset(src));
printf("Resetting Problem\n");
GUARD_CU(problem.Reset(src, src_neib, target));
printf("Resetting Enactor\n");
GUARD_CU(enactor.Reset(src, src_neib, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
printf("Calling Enactor\n");
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_values));
SizeT num_errors = Validate_Results(parameters, graph, h_values,
ref_values[run_index], false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_values));
if (validation == "last") {
auto run_index = (num_runs - 1) % num_srcs;
SizeT num_errors = Validate_Results(parameters, graph, h_values,
ref_values[run_index], false);
}
// compute running statistics
// <TODO> change NULL to problem specific per-vertex visited marker, e.g.
// h_distances info.ComputeTraversalStats(enactor, (VertexT*)NULL);
// Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
// #endif
// </TODO>
// Clean up
// GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_values;
h_values = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace pr_nibble_aj
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_template function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_pr_nibble(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT *h_values) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::pr_nibble_aj::Problem<GraphT> ProblemT;
typedef gunrock::app::pr_nibble_aj::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
VertexT src = srcs[run_index];
VertexT src_neib = graph.GetEdgeDest(graph.GetNeighborListOffset(src));
problem.Reset(src, src_neib, target);
enactor.Reset(src, src_neib, target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(h_values);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
template <typename VertexT = int, typename ValueT = float, typename SizeT = int,
typename GValueT = unsigned int, typename TValueT = GValueT>
float pr_nibble(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs,
VertexT *sources, ValueT *h_values) {
// TODO: change to other graph representation, if not using CSR
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("pr_nibble");
gunrock::graphio::UseParameters(parameters);
gunrock::app::pr_nibble_aj::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, num_edges,
gunrock::util::HOST);
// graph.CsrT::edge_values .SetPointer(edge_values, gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the pr_nibble
double elapsed_time = gunrock_pr_nibble(parameters, graph, h_values);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
36a9d2b549901bf71cd2fa26c8f86fe214f31af4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <assert.h>
#include "DataFormats/DetId/interface/DetId.h"
#include "DataFormats/HcalDetId/interface/HcalDetId.h"
__global__ void test_gen_detid(DetId *id) {
DetId did;
*id = did;
}
__global__ void test_gen_hcal_detid(HcalDetId *id) {
HcalDetId did(HcalBarrel, 5, 5, 0);
*id = did;
// trigger functions on the device
did.iphi();
did.ieta();
did.zside();
did.subdet();
did.ietaAbs();
did.depth();
did.hfdepth();
did.maskDepth();
did.baseDetId();
did.secondAnodeId();
did.crystal_ieta_low();
did.crystal_ieta_high();
did.crystal_iphi_low();
did.crystal_iphi_high();
}
void test_detid() {
// test det ids
DetId h_id, h_id_test;
DetId h_test0{1};
DetId *d_id;
hipMalloc((void **)&d_id, sizeof(DetId));
hipMemcpy(d_id, &h_id, sizeof(DetId), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test_gen_detid), dim3(1), dim3(1), 0, 0, d_id);
hipMemcpy(&h_id_test, d_id, sizeof(DetId), hipMemcpyDeviceToHost);
assert(h_id_test == h_id);
assert(h_id != h_test0);
}
void test_hcal_detid() {
HcalDetId h_id;
HcalDetId h_id_test0{HcalBarrel, 5, 5, 0};
HcalDetId *d_id;
hipMalloc((void **)&d_id, sizeof(HcalDetId));
hipMemcpy(d_id, &h_id, sizeof(HcalDetId), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test_gen_hcal_detid), dim3(1), dim3(1), 0, 0, d_id);
hipMemcpy(&h_id, d_id, sizeof(HcalDetId), hipMemcpyDeviceToHost);
std::cout << h_id_test0 << std::endl;
std::cout << h_id << std::endl;
assert(h_id_test0 == h_id);
}
int main(int argc, char **argv) {
int nDevices;
hipGetDeviceCount(&nDevices);
std::cout << "nDevices = " << nDevices << std::endl;
// test det id functionality
if (nDevices > 0)
test_detid();
// test hcal det ids
if (nDevices > 0)
test_hcal_detid();
return 0;
}
| 36a9d2b549901bf71cd2fa26c8f86fe214f31af4.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include <iostream>
#include <assert.h>
#include "DataFormats/DetId/interface/DetId.h"
#include "DataFormats/HcalDetId/interface/HcalDetId.h"
__global__ void test_gen_detid(DetId *id) {
DetId did;
*id = did;
}
__global__ void test_gen_hcal_detid(HcalDetId *id) {
HcalDetId did(HcalBarrel, 5, 5, 0);
*id = did;
// trigger functions on the device
did.iphi();
did.ieta();
did.zside();
did.subdet();
did.ietaAbs();
did.depth();
did.hfdepth();
did.maskDepth();
did.baseDetId();
did.secondAnodeId();
did.crystal_ieta_low();
did.crystal_ieta_high();
did.crystal_iphi_low();
did.crystal_iphi_high();
}
void test_detid() {
// test det ids
DetId h_id, h_id_test;
DetId h_test0{1};
DetId *d_id;
cudaMalloc((void **)&d_id, sizeof(DetId));
cudaMemcpy(d_id, &h_id, sizeof(DetId), cudaMemcpyHostToDevice);
test_gen_detid<<<1, 1>>>(d_id);
cudaMemcpy(&h_id_test, d_id, sizeof(DetId), cudaMemcpyDeviceToHost);
assert(h_id_test == h_id);
assert(h_id != h_test0);
}
void test_hcal_detid() {
HcalDetId h_id;
HcalDetId h_id_test0{HcalBarrel, 5, 5, 0};
HcalDetId *d_id;
cudaMalloc((void **)&d_id, sizeof(HcalDetId));
cudaMemcpy(d_id, &h_id, sizeof(HcalDetId), cudaMemcpyHostToDevice);
test_gen_hcal_detid<<<1, 1>>>(d_id);
cudaMemcpy(&h_id, d_id, sizeof(HcalDetId), cudaMemcpyDeviceToHost);
std::cout << h_id_test0 << std::endl;
std::cout << h_id << std::endl;
assert(h_id_test0 == h_id);
}
int main(int argc, char **argv) {
int nDevices;
cudaGetDeviceCount(&nDevices);
std::cout << "nDevices = " << nDevices << std::endl;
// test det id functionality
if (nDevices > 0)
test_detid();
// test hcal det ids
if (nDevices > 0)
test_hcal_detid();
return 0;
}
|
93f7d4c04922dd46280be0f21a6fa18faccbcd39.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/fused_multihead_attention_v2.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/flash_attention/fmha_flash_attention.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
union __half2_uint32_t_union {
half2 fp162;
uint32_t u32;
};
void set_alpha_fp16(uint32_t& alpha, float norm) {
__half2_uint32_t_union temp;
temp.fp162 = __float2half2_rn(norm);
alpha = temp.u32;
}
class FusedMHARunnerFP16v2::mhaImpl {
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface),
sm(interface->mSm),
xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) {
ORT_ENFORCE((sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89),
"Unsupported architecture");
flash_attention_kernel = nullptr;
if (interface->mEnableFlashAttention) {
flash_attention_kernel = get_flash_attention_kernels(DATA_TYPE_FP16, sm);
}
params.clear();
}
~mhaImpl() {}
void setup(const int S, const int B) {
// For bert and vit, use flash attention when sequence length is larger than the threshold.
use_flash_attention = is_flash_attention(S);
params.force_unroll = use_flash_attention;
size_t warps_m = 2;
size_t warps_n = 2;
size_t warps_k = 1;
if (use_flash_attention) {
warps_m = 4;
warps_n = 1;
} else {
if (sm == 70) {
if (S == 64 || S == 96) {
warps_m = 2;
warps_n = 2;
} else if (S == 128) {
warps_m = 1;
warps_n = 4;
} else if (S == 256 || S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
} else {
if (S == 32 || S == 64 || S == 96 || S == 128) {
warps_m = 2;
warps_n = 2;
} else if (S == 192 || S == 256) {
warps_m = 1;
warps_n = 4;
} else if (S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
}
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
has_causal_mask = false;
}
void setup_causal_masked_fmha(const int S, const int B) {
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
// fallback to original fmha_v2 when head_size <= 64 and seq_len <- 128
use_flash_attention = interface->mEnableFlashAttention;
if (params.d <= 64 && params.s <= 128) {
use_flash_attention = false;
// get max sequence length
if (params.s > 64) {
params.s = 128;
} else {
params.s = 64;
}
}
// set flags
params.force_unroll = use_flash_attention;
has_causal_mask = true;
}
void run(const void* input, const void* cu_seqlens, void* output, hipStream_t stream) {
params.qkv_ptr = const_cast<void*>(input);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cu_seqlens));
if (use_flash_attention && flash_attention_kernel != nullptr && !has_causal_mask) {
flash_attention_kernel->run(params, stream);
} else {
xmmaKernel->run(params, stream, use_flash_attention, has_causal_mask);
}
CUDA_CALL_THROW(hipPeekAtLastError());
}
bool isValid(int s) const {
if (is_flash_attention(s)) {
return (flash_attention_kernel != nullptr) && flash_attention_kernel->isValid(s);
}
return xmmaKernel->isValid(s);
}
int getSFromMaxSeqLen(const int max_seq_len) const {
if (is_flash_attention(max_seq_len)) {
return max_seq_len;
}
int S = max_seq_len;
if (max_seq_len <= 32) {
S = (sm == 70) ? 64 : 32;
} else if (max_seq_len <= 64) {
S = 64;
} else if (max_seq_len <= 96) {
S = 96;
} else if (max_seq_len <= 128) {
S = 128;
} else if (max_seq_len <= 192) {
S = (sm == 70) ? 256 : 192;
} else if (max_seq_len <= 256) {
S = 256;
} else if (max_seq_len <= 384) {
S = 384;
}
return S;
}
protected:
bool is_flash_attention(const int S) const {
ORT_ENFORCE(interface->mHasCausalMask == false);
return interface->mEnableFlashAttention && S >= kMinSequenceLengthFlashAttention;
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
const FusedMultiHeadFlashAttentionKernel* flash_attention_kernel;
size_t xmmas_m;
size_t threads_per_cta;
bool use_flash_attention = false;
bool has_causal_mask = false;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads,
const int headSize,
const int sm,
bool causal_mask,
bool enable_flash_attention,
const float scale)
: MHARunner(numHeads, headSize, 2, causal_mask, scale),
mSm(sm),
mEnableFlashAttention(enable_flash_attention),
pimpl(new mhaImpl(this)) {
}
void FusedMHARunnerFP16v2::setup(const int S, const int B) {
MHARunner::setup(S, B);
if (mHasCausalMask) {
pimpl->setup_causal_masked_fmha(S, B);
} else {
pimpl->setup(S, B);
}
}
bool FusedMHARunnerFP16v2::is_supported(int sm, int head_size, int sequence_length,
bool enable_flash_attention, bool causal) {
if (causal) {
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (enable_flash_attention) {
return head_size == 64 ||
head_size == 32 ||
head_size == 40 ||
head_size == 80 ||
head_size == 128 ||
head_size == 144 ||
head_size == 160 ||
head_size == 256;
}
return (head_size == 64 || head_size == 32 || head_size == 40) && sequence_length <= 128;
}
bool use_flash = enable_flash_attention && sequence_length >= kMinSequenceLengthFlashAttention;
if (use_flash && has_flash_attention_kernel(sm, head_size)) {
return true;
}
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (head_size != 64 && head_size != 32) {
return false;
}
if (sm == kSM_70 && head_size == 32) {
return false;
}
// Normal (not flash) fused kernel supports sequence length up to 384.
constexpr int max_sequence_length = 384;
return sequence_length <= max_sequence_length;
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const {
return 0;
}
void FusedMHARunnerFP16v2::run(const void* input, const void* cu_seqlens, void* output, hipStream_t stream) {
pimpl->run(input, cu_seqlens, output, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const {
return pimpl->isValid(s);
}
int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) const {
return pimpl->getSFromMaxSeqLen(max_seq_len);
}
std::unique_ptr<MHARunner> FusedMHARunnerFP16v2::Create(const int numHeads,
const int headSize,
const int sm,
bool causal_mask,
bool enable_flash_attention,
const float scale) {
#ifdef _MSC_VER
return std::make_unique<FusedMHARunnerFP16v2>(numHeads, headSize, sm, causal_mask, enable_flash_attention, scale);
#else
// Linux build has error using make_unique: invalid application of sizeof to incomplete type onnxruntime::contrib::cuda::FusedMHARunnerFP16v2::mhaImpl
std::unique_ptr<MHARunner> runner;
runner.reset(new FusedMHARunnerFP16v2(numHeads, headSize, sm, causal_mask, enable_flash_attention, scale));
return runner;
#endif
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 93f7d4c04922dd46280be0f21a6fa18faccbcd39.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/fused_multihead_attention_v2.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/flash_attention/fmha_flash_attention.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
union __half2_uint32_t_union {
half2 fp162;
uint32_t u32;
};
void set_alpha_fp16(uint32_t& alpha, float norm) {
__half2_uint32_t_union temp;
temp.fp162 = __float2half2_rn(norm);
alpha = temp.u32;
}
class FusedMHARunnerFP16v2::mhaImpl {
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface),
sm(interface->mSm),
xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) {
ORT_ENFORCE((sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89),
"Unsupported architecture");
flash_attention_kernel = nullptr;
if (interface->mEnableFlashAttention) {
flash_attention_kernel = get_flash_attention_kernels(DATA_TYPE_FP16, sm);
}
params.clear();
}
~mhaImpl() {}
void setup(const int S, const int B) {
// For bert and vit, use flash attention when sequence length is larger than the threshold.
use_flash_attention = is_flash_attention(S);
params.force_unroll = use_flash_attention;
size_t warps_m = 2;
size_t warps_n = 2;
size_t warps_k = 1;
if (use_flash_attention) {
warps_m = 4;
warps_n = 1;
} else {
if (sm == 70) {
if (S == 64 || S == 96) {
warps_m = 2;
warps_n = 2;
} else if (S == 128) {
warps_m = 1;
warps_n = 4;
} else if (S == 256 || S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
} else {
if (S == 32 || S == 64 || S == 96 || S == 128) {
warps_m = 2;
warps_n = 2;
} else if (S == 192 || S == 256) {
warps_m = 1;
warps_n = 4;
} else if (S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
}
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
has_causal_mask = false;
}
void setup_causal_masked_fmha(const int S, const int B) {
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
// fallback to original fmha_v2 when head_size <= 64 and seq_len <- 128
use_flash_attention = interface->mEnableFlashAttention;
if (params.d <= 64 && params.s <= 128) {
use_flash_attention = false;
// get max sequence length
if (params.s > 64) {
params.s = 128;
} else {
params.s = 64;
}
}
// set flags
params.force_unroll = use_flash_attention;
has_causal_mask = true;
}
void run(const void* input, const void* cu_seqlens, void* output, cudaStream_t stream) {
params.qkv_ptr = const_cast<void*>(input);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cu_seqlens));
if (use_flash_attention && flash_attention_kernel != nullptr && !has_causal_mask) {
flash_attention_kernel->run(params, stream);
} else {
xmmaKernel->run(params, stream, use_flash_attention, has_causal_mask);
}
CUDA_CALL_THROW(cudaPeekAtLastError());
}
bool isValid(int s) const {
if (is_flash_attention(s)) {
return (flash_attention_kernel != nullptr) && flash_attention_kernel->isValid(s);
}
return xmmaKernel->isValid(s);
}
int getSFromMaxSeqLen(const int max_seq_len) const {
if (is_flash_attention(max_seq_len)) {
return max_seq_len;
}
int S = max_seq_len;
if (max_seq_len <= 32) {
S = (sm == 70) ? 64 : 32;
} else if (max_seq_len <= 64) {
S = 64;
} else if (max_seq_len <= 96) {
S = 96;
} else if (max_seq_len <= 128) {
S = 128;
} else if (max_seq_len <= 192) {
S = (sm == 70) ? 256 : 192;
} else if (max_seq_len <= 256) {
S = 256;
} else if (max_seq_len <= 384) {
S = 384;
}
return S;
}
protected:
bool is_flash_attention(const int S) const {
ORT_ENFORCE(interface->mHasCausalMask == false);
return interface->mEnableFlashAttention && S >= kMinSequenceLengthFlashAttention;
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
const FusedMultiHeadFlashAttentionKernel* flash_attention_kernel;
size_t xmmas_m;
size_t threads_per_cta;
bool use_flash_attention = false;
bool has_causal_mask = false;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads,
const int headSize,
const int sm,
bool causal_mask,
bool enable_flash_attention,
const float scale)
: MHARunner(numHeads, headSize, 2, causal_mask, scale),
mSm(sm),
mEnableFlashAttention(enable_flash_attention),
pimpl(new mhaImpl(this)) {
}
void FusedMHARunnerFP16v2::setup(const int S, const int B) {
MHARunner::setup(S, B);
if (mHasCausalMask) {
pimpl->setup_causal_masked_fmha(S, B);
} else {
pimpl->setup(S, B);
}
}
bool FusedMHARunnerFP16v2::is_supported(int sm, int head_size, int sequence_length,
bool enable_flash_attention, bool causal) {
if (causal) {
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (enable_flash_attention) {
return head_size == 64 ||
head_size == 32 ||
head_size == 40 ||
head_size == 80 ||
head_size == 128 ||
head_size == 144 ||
head_size == 160 ||
head_size == 256;
}
return (head_size == 64 || head_size == 32 || head_size == 40) && sequence_length <= 128;
}
bool use_flash = enable_flash_attention && sequence_length >= kMinSequenceLengthFlashAttention;
if (use_flash && has_flash_attention_kernel(sm, head_size)) {
return true;
}
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (head_size != 64 && head_size != 32) {
return false;
}
if (sm == kSM_70 && head_size == 32) {
return false;
}
// Normal (not flash) fused kernel supports sequence length up to 384.
constexpr int max_sequence_length = 384;
return sequence_length <= max_sequence_length;
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const {
return 0;
}
void FusedMHARunnerFP16v2::run(const void* input, const void* cu_seqlens, void* output, cudaStream_t stream) {
pimpl->run(input, cu_seqlens, output, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const {
return pimpl->isValid(s);
}
int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) const {
return pimpl->getSFromMaxSeqLen(max_seq_len);
}
std::unique_ptr<MHARunner> FusedMHARunnerFP16v2::Create(const int numHeads,
const int headSize,
const int sm,
bool causal_mask,
bool enable_flash_attention,
const float scale) {
#ifdef _MSC_VER
return std::make_unique<FusedMHARunnerFP16v2>(numHeads, headSize, sm, causal_mask, enable_flash_attention, scale);
#else
// Linux build has error using make_unique: invalid application of ‘sizeof’ to incomplete type ‘onnxruntime::contrib::cuda::FusedMHARunnerFP16v2::mhaImpl
std::unique_ptr<MHARunner> runner;
runner.reset(new FusedMHARunnerFP16v2(numHeads, headSize, sm, causal_mask, enable_flash_attention, scale));
return runner;
#endif
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
2b29c212af4aeb6770a373233b4cba5e57b33271.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <windows.h>
using namespace std;
void showMatriz(int *matriz, int anchura, int altura);
void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo);
void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo);
int checkFull(int *matriz, int tamano);
bool checkMove(int *matriz, int ancho, int alto);
void guardar(int vidas, int *tablero, int altura, int anchura, char dificultad);
int* cargar();
int* MostrarEspecificaciones();
hipError_t cudaStatus;
/* add_up
* Funcin del kernel para sumar hacia arriba todos los nmeros que sean iguales.
*/
__device__ void add_up(int *matriz, int x, int y, int altura, int anchura)
{
if (x != 0 && y < anchura) //Los primeros hilos no deben realizar ninguna operacion pues sern modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[(x - 1)*anchura + y]) //Si es igual a su superior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; i <= x; i++)
{
if (matriz[x*anchura + y] == matriz[(x - i)*anchura + y])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero ser mezclado con otro y no estar disponible
{
matriz[(x - 1)*anchura + y] = matriz[(x - 1)*anchura + y] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[(x - 1)*anchura + y] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[(x - 1)*anchura + y] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_up
* Funcin del kernel para desplazar todos los nmeros hacia arriba.
*/
__device__ void stack_up(int *matriz, int anchura, int altura, int x, int y) {
for (int i = altura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((x != 0) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y - anchura)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[x*anchura + (y - anchura)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque y
}
}
/* mov_upK
* Kernel que gestiona las operaciones para mover hacia arriba los numeros, sumandolos en el proceso
*/
__global__ void mov_upK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_up(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_up(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_up(matriz, anchura, altura, x, y);
}
/* move_up
* Metodo que gestiona la llamada al kerner mov_upK
*/
hipError_t move_up(int *matriz, int ancho, int alto) {
hipError_t cudaStatus;
int *dev_m; //Establecemos la matriz donde se van a recoger los resultados
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = hipMemcpy(dev_m, matriz, ancho*alto * sizeof(int), hipMemcpyHostToDevice); //copiamos los datos iniciales
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_upK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error en synchronize mov_up\n", cudaStatus);
goto Error;
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en mov_up");
goto Error;
}
cudaStatus = hipMemcpy(matriz, dev_m, ancho*alto * sizeof(int), hipMemcpyDeviceToHost); //recogemos el resultado en la variable del host
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_upK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
hipFree(dev_m);
return cudaStatus;
}
/* add_down
* Funcin del kernel para sumar hacia la abajo todos los nmeros que sean iguales.
*/
__device__ void add_down(int *matriz, int x, int y, int altura, int anchura)
{
if (x != altura - 1 && y < anchura) //Los ultimos hilos no deben realizar ninguna operacion pues sern modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[(x + 1)*anchura + y]) //Si es igual a su inferior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; x + i <= altura; i++)
{
if (matriz[x*anchura + y] == matriz[(x + i)*anchura + y])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero ser mezclado con otro y no estar disponible
{
matriz[(x + 1)*anchura + y] = matriz[(x + 1)*anchura + y] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[(x + 1)*anchura + y] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[(x + 1)*anchura + y] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_down
* Funcin del kernel para desplazar todos los nmeros hacia abajo.
*/
__device__ void stack_down(int *matriz, int anchura, int altura, int x, int y) {
for (int i = altura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((x != altura - 1) && (matriz[x*anchura + y] != 0) && matriz[(x + 1)*anchura + y] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[(x + 1)*anchura + y] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads();
}
}
/* mov_downK
* Kernel que gestiona las operaciones para mover hacia abajo los numeros, sumandolos en el proceso
*/
__global__ void mov_downK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_down(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_down(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_down(matriz, anchura, altura, x, y);
}
/* move_down
* Metodo que gestiona la llamada al kerner mov_downK
*/
hipError_t move_down(int *matriz, int ancho, int alto) {
hipError_t cudaStatus;
int *dev_m; //Establecemos la matriz donde se van a recoger los resultados
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = hipMemcpy(dev_m, matriz, ancho*alto * sizeof(int), hipMemcpyHostToDevice); //copiamos los datos iniciales
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_downK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error en synchronize mov_down\n", cudaStatus);
goto Error;
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en mov_down");
goto Error;
}
cudaStatus = hipMemcpy(matriz, dev_m, ancho*alto * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_downK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
hipFree(dev_m);
return cudaStatus;
}
/* add_left
* Funcin del kernel para sumar hacia la izquierda todos los nmeros que sean iguales.
*/
__device__ void add_left(int *matriz, int x, int y, int altura, int anchura)
{
if (y != 0 && y < anchura) //Los primeros hilos de la izquierda no deben realizar ninguna operacion pues sern modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y - 1)]) //Si es igual a su vecino izquierdo, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; i <= y; i++)
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y - i)])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero ser mezclado con otro y no estar disponible
{
matriz[x*anchura + (y - 1)] = matriz[x*anchura + (y - 1)] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[x*anchura + (y - 1)] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[x*anchura + (y - 1)] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_left
* Funcin del kernel para desplazar todos los nmeros hacia la izquierda.
*/
__device__ void stack_left(int *matriz, int anchura, int altura, int x, int y) {
for (int i = anchura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((y != 0) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y - 1)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[x*anchura + (y - 1)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque
}
}
/* mov_leftK
* Kernel que gestiona las operaciones para mover hacia la izquierda los numeros, sumandolos en el proceso
*/
__global__ void mov_leftK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_left(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_left(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_left(matriz, anchura, altura, x, y);
}
/* move_left
* Metodo que gestiona la llamada al kerner mov_leftK
*/
hipError_t move_left(int *matriz, int ancho, int alto) {
hipError_t cudaStatus;
int *dev_m; //Establecemos la matriz donde se van a recoger los resultados
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = hipMemcpy(dev_m, matriz, ancho*alto * sizeof(int), hipMemcpyHostToDevice); //copiamos los datos iniciales
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_leftK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error en synchronize mov_leftK\n", cudaStatus);
goto Error;
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en mov_leftK");
goto Error;
}
cudaStatus = hipMemcpy(matriz, dev_m, ancho*alto * sizeof(int), hipMemcpyDeviceToHost); //recogemos el resultado en la variable del host
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_leftK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
hipFree(dev_m);
return cudaStatus;
}
/* add_right
* Funcin del kernel para sumar hacia la izquierda todos los nmeros que sean iguales.
*/
__device__ void add_right(int *matriz, int x, int y, int altura, int anchura)
{
if (y != anchura - 1 && y < anchura) //Los primeros hilos de la derecha no deben realizar ninguna operacion pues sern modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y + 1)])//Si es igual a su superior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; y + i < anchura; i++)
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y + i)])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero ser mezclado con otro y no estar disponible
{
matriz[x*anchura + (y + 1)] = matriz[x*anchura + (y + 1)] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[x*anchura + (y + 1)] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[x*anchura + (y + 1)] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_right
* Funcin del kernel para desplazar todos los nmeros hacia la derecha.
*/
__device__ void stack_right(int *matriz, int anchura, int altura, int x, int y) {
for (int i = anchura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((y != anchura - 1) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y + 1)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[x*anchura + (y + 1)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque
}
}
/* mov_rightK
* Kernel que gestiona las operaciones para mover hacia la derecha los numeros, sumandolos en el proceso
*/
__global__ void mov_rightK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_right(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_right(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_right(matriz, anchura, altura, x, y);
}
/* move_right
* Metodo que gestiona la llamada al kerner mov_rightK
*/
hipError_t move_right(int *matriz, int ancho, int alto) { //Establecemos la matriz donde se van a recoger los resultados
hipError_t cudaStatus;
int *dev_m;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = hipMemcpy(dev_m, matriz, ancho*alto * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_rightK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error en synchronize mov_right\n", cudaStatus);
goto Error;
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en mov_right");
goto Error;
}
cudaStatus = hipMemcpy(matriz, dev_m, ancho*alto * sizeof(int), hipMemcpyDeviceToHost); //recogemos el resultado en la variable del host
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_rightK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
hipFree(dev_m);
return cudaStatus;
}
int main()
{
hipError_t cudaStatus;
srand(time(NULL));
int ancho;
int alto;
int numSemillas = 0;
int vidas = 5;
char modo;
char cargado;
char ia;
int *datos;
int *matriz;
int *especificaciones;
especificaciones = MostrarEspecificaciones();
printf("Desea activar la IA? (y/n): ");
cin >> ia;
printf("Desea comprobar si hay partidas guardadas?(y/n): ");
cin >> cargado;
if (cargado == 'y')
{
datos = cargar();
vidas = datos[0];
alto = datos[1];
ancho = datos[2];
int dificultad = datos[3];
if (dificultad == 0)
{
modo = 'B';
numSemillas = 15;
}
else
{
modo = 'A';
numSemillas = 8;
}
matriz = (int*)malloc(ancho*alto * sizeof(int));
for (int i = 0; i < alto*ancho; i++)
{
matriz[i] = datos[4 + i];
}
}
else
{
printf("Indique el ancho de la matriz: ");
cin >> ancho;
printf("Indique el alto de la matriz: ");
cin >> alto;
if (ancho > especificaciones[0] || alto > especificaciones[0])
{
printf("La matriz seleccionada es demasiado grande para tu tarjeta grafica. Lo siento.");
return 0;
}
printf("Indique la dificultad del juego (B->Bajo / A->Alto): ");
cin >> modo;
switch (modo)
{
case 'B':
numSemillas = 15;
break;
case 'A':
numSemillas = 8;
break;
default:
break;
}
matriz = (int*)malloc(ancho*alto * sizeof(int));
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
}
if (ia == 'n')
{
while ((!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
system("CLS");
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
gestionSemillas(matriz, ancho, numSemillas, alto, modo);
char movimiento = 'p';
printf("Vidas restantes: %d\n", vidas);
printf("Tablero:\n");
showMatriz(matriz, ancho, alto);
printf("Hacia donde quieres mover?(w/a/s/d) Para guardar teclee g: ");
cin >> movimiento;
switch (movimiento)
{
case 'w':
cudaStatus = move_up(matriz, ancho, alto);
break;
case 'a':
cudaStatus = move_left(matriz, ancho, alto);
break;
case 's':
cudaStatus = move_down(matriz, ancho, alto);
break;
case 'd':
cudaStatus = move_right(matriz, ancho, alto);
break;
case 'g':
guardar(vidas, matriz, alto, ancho, modo);
printf("Partida guardada, hasta pronto!");
return 0;
default:
break;
}
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
}
else {
while ((!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
system("CLS");
gestionSemillas(matriz, ancho, numSemillas, alto, modo);
char movimiento = 'p';
printf("Vidas restantes: %d\n", vidas);
printf("Tablero:\n");
showMatriz(matriz, ancho, alto);
int r = rand() % 4;
switch (r)
{
case 0:
printf("Moviendo hacia arriba\n");
cudaStatus = move_up(matriz, ancho, alto);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "move_up failed!");
return 1;
}
break;
case 1:
printf("Moviendo hacia izquierda\n");
cudaStatus = move_left(matriz, ancho, alto);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "move_left failed!");
return 1;
}
break;
case 2:
printf("Moviendo hacia abajo\n");
cudaStatus = move_down(matriz, ancho, alto);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "move_down failed!");
return 1;
}
break;
case 3:
printf("Moviendo hacia derecha\n");
cudaStatus = move_right(matriz, ancho, alto);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "move_right failed!");
return 1;
}
break;
default:
break;
}
//Sleep(100);
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
}
return 0;
}
/* showMatriz
* Funcin que muestra la matriz por pantalla.
*/
void showMatriz(int *matriz, int anchura, int altura)
{
for (int i = 0; i < altura; i++)
{
for (int j = 0; j < anchura; j++)
{
printf("%d\t", matriz[i*anchura + j]);
}
printf("\n");
}
}
/* generateSeeds
* Funcin que genera una cantidad de semillas en la matriz, teniendo en cuenta sus dimensiones y en el modo de la dificultad que se encuentre.
* Si es nivel bajo B entonces se crearn 15 semillas con los valores 2, 4 y 8. Si es nivel alto A se crearn 8 semillas con valores 2, 4
*/
void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo)
{
int total = ancho * alto;
int num;
if (modo == 'B')
{
for (int i = 0; i < cantidad; i++)
{
int r = rand() % total;
while (matriz[r] != 0) {
r = rand() % total;
}
int opcion = rand() % 100;
if (opcion <= 50) {
matriz[r] = 2;
}
else if (opcion <= 80 && opcion > 50) {
matriz[r] = 4;
}
else {
matriz[r] = 8;
}
}
}
else if (modo == 'A')
{
for (int i = 0; i < cantidad; i++)
{
int r = rand() % total;
while (matriz[r] != 0) {
r = rand() % total;
}
int opcion = rand() % 100;
if (opcion <= 60) {
matriz[r] = 2;
}
else {
matriz[r] = 4;
}
}
}
}
/* checkMove
* Funcin que gestiona si se pueden realizar movimientos o no, esto servir por si aunque la matriz este llena,
* si se pueden realizar movimientos entonces no se acabe la partida. Para ello el mtodo mirara en todas las
* direcciones del eje cartesiano para ver si algn nmero es igual que el y se puede sumar o en cambio, si es un 0, desplazarse por la matriz.
*/
bool checkMove(int *matriz, int anchura, int altura)
{
for (int i = 0; i < anchura*(altura - 1); i++)
{
if (matriz[i] == matriz[i + anchura] || matriz[i + anchura] == 0)
{
return true;
}
}
for (int i = anchura; i < anchura*altura; i++)
{
if (matriz[i] == matriz[i - anchura] || matriz[i - anchura] == 0)
{
return true;
}
}
for (int i = 0; i < altura; i++)
{
for (int j = 0; j < anchura - 1; j++)
{
if (matriz[i*anchura + i] == matriz[i*anchura + i + 1] || matriz[i*anchura + i + 1] == 0)
{
return true;
}
}
}
for (int i = 0; i < altura; i++)
{
for (int j = 1; j < anchura; j++)
{
if (matriz[i*anchura + i] == matriz[i*anchura + i - 1] || matriz[i*anchura + i - 1] == 0)
{
return true;
}
}
}
return false;
}
/* checkFull
* Funcin que gestiona si la matriz esta llena o no, es decir, si tiene algn 0 an o no lo tiene.
*/
int checkFull(int *matriz, int tamano)
{
int flag = 1;
for (int i = 0; i < tamano; i++)
{
if (matriz[i] == 0)
{
flag = 0;
}
}
return flag;
}
/* gestionSemillas
* Funcin que gestiona cuntos huecos libres hay en la matriz mediante un contador, para llamar posteriormente a generateSeeds para crear las semillas necesarias,
* controlando en todo momento que el nmero de semillas a generar tengan hueco libre en la matriz.
*/
void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo)
{
if (!checkFull(matriz, ancho*alto))
{
int n = 0;
for (int i = 0; i < ancho*alto; i++)
{
if (matriz[i] == 0)
n++;
}
if (modo == 'B')
{
if (n < 15)
{
generateSeeds(matriz, ancho, alto, n, modo);
}
else {
generateSeeds(matriz, ancho, alto, numeroSemillas, modo);
}
}
else if (modo == 'A')
{
if (n < 8)
{
generateSeeds(matriz, ancho, alto, n, modo);
}
else {
generateSeeds(matriz, ancho, alto, numeroSemillas, modo);
}
}
}
}
/* guardar
* Funcin encargada de guardar la partida en un archivo externo (.txt) para preservar en el tiempo la partida por si se desea reanudarla ms tarde desde en el punto que se quedo.
*/
void guardar(int vidas, int *matriz, int altura, int anchura, char dificultad) {
ofstream archivo;
int dif;
archivo.open("2048_savedata.txt", ios::out); //Creamos o reemplazamos el archivo
//Si no se puede guardar ERROR
if (archivo.fail())
{
cout << "Error al guardar la partida.\n";
exit(1);
}
if (dificultad == 'B')
{
dif = 0;
}
else
{
dif = 1;
}
archivo << vidas << endl; //Guardamos las vidas
archivo << altura << endl; //Guardamos las altura
archivo << anchura << endl; //Guardamos las anchura
archivo << dif << endl; //Guardamos la dificultad
//Guardamos la matriz
for (int i = 0; i < (altura*anchura); i++)
{
archivo << matriz[i] << " ";
}
cout << "\nPartida guardada con exito." << endl;
archivo.close(); //Cerramos el archivo
}
/* cargar
* Funcin que cargar la partida desde un archivo externo (.dat) en el vector de la matriz para proseguir jugando.
*/
int* cargar() {
ifstream archivo;
int i = 4, vidas, altura, anchura, dif;
int *partida;
archivo.open("2048_savedata.txt", ios::in); //Abrimos el archivo en modo lectura
//Si no se puede cargar ERROR
if (archivo.fail())
{
cout << "Error al abrir la partida guardada. El fichero no existe o est corrupto\n";
exit(1);
}
archivo >> vidas;
archivo >> altura;
archivo >> anchura;
archivo >> dif;
partida = (int*)malloc(4 * sizeof(int) + altura * anchura * sizeof(int)); //Reservamos memoria para los datos de la partida
partida[0] = vidas; //Guardamos vidas
partida[1] = altura; //Guardamos altura
partida[2] = anchura; //Guardamos anchura
partida[3] = dif; //Guardamos la dificultad
//Guardamos la matriz
while (!archivo.eof()) { //Mientras no sea el final del archivo
archivo >> partida[i];
i++;
}
archivo.close(); //Cerramos el archivo
return partida;
}
int* MostrarEspecificaciones()
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
int * especificacion;
especificacion = (int*)malloc(2 * sizeof(int));
for (int i = 0; i < 2; i++) {
especificacion[i] = 0;
}
especificacion[0] = prop.maxThreadsPerBlock;
especificacion[1] = *prop.maxGridSize;
return especificacion;
}
| 2b29c212af4aeb6770a373233b4cba5e57b33271.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <windows.h>
using namespace std;
void showMatriz(int *matriz, int anchura, int altura);
void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo);
void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo);
int checkFull(int *matriz, int tamano);
bool checkMove(int *matriz, int ancho, int alto);
void guardar(int vidas, int *tablero, int altura, int anchura, char dificultad);
int* cargar();
int* MostrarEspecificaciones();
cudaError_t cudaStatus;
/* add_up
* Función del kernel para sumar hacia arriba todos los números que sean iguales.
*/
__device__ void add_up(int *matriz, int x, int y, int altura, int anchura)
{
if (x != 0 && y < anchura) //Los primeros hilos no deben realizar ninguna operacion pues serán modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[(x - 1)*anchura + y]) //Si es igual a su superior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; i <= x; i++)
{
if (matriz[x*anchura + y] == matriz[(x - i)*anchura + y])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero será mezclado con otro y no estará disponible
{
matriz[(x - 1)*anchura + y] = matriz[(x - 1)*anchura + y] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[(x - 1)*anchura + y] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[(x - 1)*anchura + y] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_up
* Función del kernel para desplazar todos los números hacia arriba.
*/
__device__ void stack_up(int *matriz, int anchura, int altura, int x, int y) {
for (int i = altura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((x != 0) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y - anchura)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[x*anchura + (y - anchura)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque y
}
}
/* mov_upK
* Kernel que gestiona las operaciones para mover hacia arriba los numeros, sumandolos en el proceso
*/
__global__ void mov_upK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_up(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_up(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_up(matriz, anchura, altura, x, y);
}
/* move_up
* Metodo que gestiona la llamada al kerner mov_upK
*/
cudaError_t move_up(int *matriz, int ancho, int alto) {
cudaError_t cudaStatus;
int *dev_m; //Establecemos la matriz donde se van a recoger los resultados
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = cudaMemcpy(dev_m, matriz, ancho*alto * sizeof(int), cudaMemcpyHostToDevice); //copiamos los datos iniciales
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_upK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error en synchronize mov_up\n", cudaStatus);
goto Error;
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en mov_up");
goto Error;
}
cudaStatus = cudaMemcpy(matriz, dev_m, ancho*alto * sizeof(int), cudaMemcpyDeviceToHost); //recogemos el resultado en la variable del host
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_upK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
cudaFree(dev_m);
return cudaStatus;
}
/* add_down
* Función del kernel para sumar hacia la abajo todos los números que sean iguales.
*/
__device__ void add_down(int *matriz, int x, int y, int altura, int anchura)
{
if (x != altura - 1 && y < anchura) //Los ultimos hilos no deben realizar ninguna operacion pues serán modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[(x + 1)*anchura + y]) //Si es igual a su inferior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; x + i <= altura; i++)
{
if (matriz[x*anchura + y] == matriz[(x + i)*anchura + y])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero será mezclado con otro y no estará disponible
{
matriz[(x + 1)*anchura + y] = matriz[(x + 1)*anchura + y] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[(x + 1)*anchura + y] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[(x + 1)*anchura + y] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_down
* Función del kernel para desplazar todos los números hacia abajo.
*/
__device__ void stack_down(int *matriz, int anchura, int altura, int x, int y) {
for (int i = altura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((x != altura - 1) && (matriz[x*anchura + y] != 0) && matriz[(x + 1)*anchura + y] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[(x + 1)*anchura + y] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads();
}
}
/* mov_downK
* Kernel que gestiona las operaciones para mover hacia abajo los numeros, sumandolos en el proceso
*/
__global__ void mov_downK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_down(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_down(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_down(matriz, anchura, altura, x, y);
}
/* move_down
* Metodo que gestiona la llamada al kerner mov_downK
*/
cudaError_t move_down(int *matriz, int ancho, int alto) {
cudaError_t cudaStatus;
int *dev_m; //Establecemos la matriz donde se van a recoger los resultados
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = cudaMemcpy(dev_m, matriz, ancho*alto * sizeof(int), cudaMemcpyHostToDevice); //copiamos los datos iniciales
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_downK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error en synchronize mov_down\n", cudaStatus);
goto Error;
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en mov_down");
goto Error;
}
cudaStatus = cudaMemcpy(matriz, dev_m, ancho*alto * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_downK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
cudaFree(dev_m);
return cudaStatus;
}
/* add_left
* Función del kernel para sumar hacia la izquierda todos los números que sean iguales.
*/
__device__ void add_left(int *matriz, int x, int y, int altura, int anchura)
{
if (y != 0 && y < anchura) //Los primeros hilos de la izquierda no deben realizar ninguna operacion pues serán modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y - 1)]) //Si es igual a su vecino izquierdo, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; i <= y; i++)
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y - i)])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero será mezclado con otro y no estará disponible
{
matriz[x*anchura + (y - 1)] = matriz[x*anchura + (y - 1)] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[x*anchura + (y - 1)] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[x*anchura + (y - 1)] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_left
* Función del kernel para desplazar todos los números hacia la izquierda.
*/
__device__ void stack_left(int *matriz, int anchura, int altura, int x, int y) {
for (int i = anchura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((y != 0) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y - 1)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[x*anchura + (y - 1)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque
}
}
/* mov_leftK
* Kernel que gestiona las operaciones para mover hacia la izquierda los numeros, sumandolos en el proceso
*/
__global__ void mov_leftK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_left(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_left(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_left(matriz, anchura, altura, x, y);
}
/* move_left
* Metodo que gestiona la llamada al kerner mov_leftK
*/
cudaError_t move_left(int *matriz, int ancho, int alto) {
cudaError_t cudaStatus;
int *dev_m; //Establecemos la matriz donde se van a recoger los resultados
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = cudaMemcpy(dev_m, matriz, ancho*alto * sizeof(int), cudaMemcpyHostToDevice); //copiamos los datos iniciales
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_leftK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error en synchronize mov_leftK\n", cudaStatus);
goto Error;
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en mov_leftK");
goto Error;
}
cudaStatus = cudaMemcpy(matriz, dev_m, ancho*alto * sizeof(int), cudaMemcpyDeviceToHost); //recogemos el resultado en la variable del host
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_leftK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
cudaFree(dev_m);
return cudaStatus;
}
/* add_right
* Función del kernel para sumar hacia la izquierda todos los números que sean iguales.
*/
__device__ void add_right(int *matriz, int x, int y, int altura, int anchura)
{
if (y != anchura - 1 && y < anchura) //Los primeros hilos de la derecha no deben realizar ninguna operacion pues serán modificados por los demas
{
if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y + 1)])//Si es igual a su superior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna
{
int iguales = 0;
iguales++;
for (int i = 1; y + i < anchura; i++)
{
if (matriz[x*anchura + y] == matriz[x*anchura + (y + i)])
{
iguales++;
}
else {
break;
}
}
if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero será mezclado con otro y no estará disponible
{
matriz[x*anchura + (y + 1)] = matriz[x*anchura + (y + 1)] * 2;
matriz[x*anchura + y] = 0;
}
}
else if (matriz[x*anchura + (y + 1)] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse
{
matriz[x*anchura + (y + 1)] = matriz[x*anchura + y];
matriz[x*anchura + y] = 0;
}
}
}
}
/* stack_right
* Función del kernel para desplazar todos los números hacia la derecha.
*/
__device__ void stack_right(int *matriz, int anchura, int altura, int x, int y) {
for (int i = anchura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista
{
if ((y != anchura - 1) && (matriz[x*anchura + y] != 0) && matriz[x*anchura + (y + 1)] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada
{
matriz[x*anchura + (y + 1)] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda
matriz[x*anchura + y] = 0;
}
__syncthreads(); //utilizamos una sincronizacion para que estos pasos sean realizados a la vez por los hilos del bloque
}
}
/* mov_rightK
* Kernel que gestiona las operaciones para mover hacia la derecha los numeros, sumandolos en el proceso
*/
__global__ void mov_rightK(int *matriz, int anchura, int altura) {
int x = blockIdx.x;
int y = threadIdx.y;
stack_right(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento:
add_right(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0
__syncthreads();
stack_right(matriz, anchura, altura, x, y);
}
/* move_right
* Metodo que gestiona la llamada al kerner mov_rightK
*/
cudaError_t move_right(int *matriz, int ancho, int alto) { //Establecemos la matriz donde se van a recoger los resultados
cudaError_t cudaStatus;
int *dev_m;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en setdevice");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_m, ancho*alto * sizeof(int)); //Reservamos memoria para la matriz resultado
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en Malloc");
goto Error;
}
cudaStatus = cudaMemcpy(dev_m, matriz, ancho*alto * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//Establecemos las dimensiones pertinentes
dim3 dimgrid(alto, 1);
dim3 dimblock(1, ancho, 1);
mov_rightK << < dimgrid, dimblock >> > (dev_m, ancho, alto);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error en synchronize mov_right\n", cudaStatus);
goto Error;
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en mov_right");
goto Error;
}
cudaStatus = cudaMemcpy(matriz, dev_m, ancho*alto * sizeof(int), cudaMemcpyDeviceToHost); //recogemos el resultado en la variable del host
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Error en memcpy to host de mov_rightK");
goto Error;
}
Error: //Liberamos la memoria de la variable en caso de error
cudaFree(dev_m);
return cudaStatus;
}
int main()
{
cudaError_t cudaStatus;
srand(time(NULL));
int ancho;
int alto;
int numSemillas = 0;
int vidas = 5;
char modo;
char cargado;
char ia;
int *datos;
int *matriz;
int *especificaciones;
especificaciones = MostrarEspecificaciones();
printf("Desea activar la IA? (y/n): ");
cin >> ia;
printf("Desea comprobar si hay partidas guardadas?(y/n): ");
cin >> cargado;
if (cargado == 'y')
{
datos = cargar();
vidas = datos[0];
alto = datos[1];
ancho = datos[2];
int dificultad = datos[3];
if (dificultad == 0)
{
modo = 'B';
numSemillas = 15;
}
else
{
modo = 'A';
numSemillas = 8;
}
matriz = (int*)malloc(ancho*alto * sizeof(int));
for (int i = 0; i < alto*ancho; i++)
{
matriz[i] = datos[4 + i];
}
}
else
{
printf("Indique el ancho de la matriz: ");
cin >> ancho;
printf("Indique el alto de la matriz: ");
cin >> alto;
if (ancho > especificaciones[0] || alto > especificaciones[0])
{
printf("La matriz seleccionada es demasiado grande para tu tarjeta grafica. Lo siento.");
return 0;
}
printf("Indique la dificultad del juego (B->Bajo / A->Alto): ");
cin >> modo;
switch (modo)
{
case 'B':
numSemillas = 15;
break;
case 'A':
numSemillas = 8;
break;
default:
break;
}
matriz = (int*)malloc(ancho*alto * sizeof(int));
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
}
if (ia == 'n')
{
while ((!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
system("CLS");
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
gestionSemillas(matriz, ancho, numSemillas, alto, modo);
char movimiento = 'p';
printf("Vidas restantes: %d\n", vidas);
printf("Tablero:\n");
showMatriz(matriz, ancho, alto);
printf("Hacia donde quieres mover?(w/a/s/d) Para guardar teclee g: ");
cin >> movimiento;
switch (movimiento)
{
case 'w':
cudaStatus = move_up(matriz, ancho, alto);
break;
case 'a':
cudaStatus = move_left(matriz, ancho, alto);
break;
case 's':
cudaStatus = move_down(matriz, ancho, alto);
break;
case 'd':
cudaStatus = move_right(matriz, ancho, alto);
break;
case 'g':
guardar(vidas, matriz, alto, ancho, modo);
printf("Partida guardada, hasta pronto!");
return 0;
default:
break;
}
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
}
else {
while ((!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
system("CLS");
gestionSemillas(matriz, ancho, numSemillas, alto, modo);
char movimiento = 'p';
printf("Vidas restantes: %d\n", vidas);
printf("Tablero:\n");
showMatriz(matriz, ancho, alto);
int r = rand() % 4;
switch (r)
{
case 0:
printf("Moviendo hacia arriba\n");
cudaStatus = move_up(matriz, ancho, alto);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "move_up failed!");
return 1;
}
break;
case 1:
printf("Moviendo hacia izquierda\n");
cudaStatus = move_left(matriz, ancho, alto);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "move_left failed!");
return 1;
}
break;
case 2:
printf("Moviendo hacia abajo\n");
cudaStatus = move_down(matriz, ancho, alto);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "move_down failed!");
return 1;
}
break;
case 3:
printf("Moviendo hacia derecha\n");
cudaStatus = move_right(matriz, ancho, alto);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "move_right failed!");
return 1;
}
break;
default:
break;
}
//Sleep(100);
if (!(!checkFull(matriz, ancho*alto) || checkMove(matriz, ancho, alto)) && vidas > 0)
{
for (int i = 0; i < ancho*alto; i++) {
matriz[i] = 0;
}
vidas--;
}
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
}
return 0;
}
/* showMatriz
* Función que muestra la matriz por pantalla.
*/
void showMatriz(int *matriz, int anchura, int altura)
{
for (int i = 0; i < altura; i++)
{
for (int j = 0; j < anchura; j++)
{
printf("%d\t", matriz[i*anchura + j]);
}
printf("\n");
}
}
/* generateSeeds
* Función que genera una cantidad de semillas en la matriz, teniendo en cuenta sus dimensiones y en el modo de la dificultad que se encuentre.
* Si es nivel bajo “B” entonces se crearán 15 semillas con los valores 2, 4 y 8. Si es nivel alto “A” se crearán 8 semillas con valores 2, 4
*/
void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo)
{
int total = ancho * alto;
int num;
if (modo == 'B')
{
for (int i = 0; i < cantidad; i++)
{
int r = rand() % total;
while (matriz[r] != 0) {
r = rand() % total;
}
int opcion = rand() % 100;
if (opcion <= 50) {
matriz[r] = 2;
}
else if (opcion <= 80 && opcion > 50) {
matriz[r] = 4;
}
else {
matriz[r] = 8;
}
}
}
else if (modo == 'A')
{
for (int i = 0; i < cantidad; i++)
{
int r = rand() % total;
while (matriz[r] != 0) {
r = rand() % total;
}
int opcion = rand() % 100;
if (opcion <= 60) {
matriz[r] = 2;
}
else {
matriz[r] = 4;
}
}
}
}
/* checkMove
* Función que gestiona si se pueden realizar movimientos o no, esto servirá por si aunque la matriz este llena,
* si se pueden realizar movimientos entonces no se acabe la partida. Para ello el método mirara en todas las
* direcciones del eje cartesiano para ver si algún número es igual que el y se puede sumar o en cambio, si es un 0, desplazarse por la matriz.
*/
bool checkMove(int *matriz, int anchura, int altura)
{
for (int i = 0; i < anchura*(altura - 1); i++)
{
if (matriz[i] == matriz[i + anchura] || matriz[i + anchura] == 0)
{
return true;
}
}
for (int i = anchura; i < anchura*altura; i++)
{
if (matriz[i] == matriz[i - anchura] || matriz[i - anchura] == 0)
{
return true;
}
}
for (int i = 0; i < altura; i++)
{
for (int j = 0; j < anchura - 1; j++)
{
if (matriz[i*anchura + i] == matriz[i*anchura + i + 1] || matriz[i*anchura + i + 1] == 0)
{
return true;
}
}
}
for (int i = 0; i < altura; i++)
{
for (int j = 1; j < anchura; j++)
{
if (matriz[i*anchura + i] == matriz[i*anchura + i - 1] || matriz[i*anchura + i - 1] == 0)
{
return true;
}
}
}
return false;
}
/* checkFull
* Función que gestiona si la matriz esta llena o no, es decir, si tiene algún 0 aún o no lo tiene.
*/
int checkFull(int *matriz, int tamano)
{
int flag = 1;
for (int i = 0; i < tamano; i++)
{
if (matriz[i] == 0)
{
flag = 0;
}
}
return flag;
}
/* gestionSemillas
* Función que gestiona cuántos huecos libres hay en la matriz mediante un contador, para llamar posteriormente a generateSeeds para crear las semillas necesarias,
* controlando en todo momento que el número de semillas a generar tengan hueco libre en la matriz.
*/
void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo)
{
if (!checkFull(matriz, ancho*alto))
{
int n = 0;
for (int i = 0; i < ancho*alto; i++)
{
if (matriz[i] == 0)
n++;
}
if (modo == 'B')
{
if (n < 15)
{
generateSeeds(matriz, ancho, alto, n, modo);
}
else {
generateSeeds(matriz, ancho, alto, numeroSemillas, modo);
}
}
else if (modo == 'A')
{
if (n < 8)
{
generateSeeds(matriz, ancho, alto, n, modo);
}
else {
generateSeeds(matriz, ancho, alto, numeroSemillas, modo);
}
}
}
}
/* guardar
* Función encargada de guardar la partida en un archivo externo (.txt) para preservar en el tiempo la partida por si se desea reanudarla más tarde desde en el punto que se quedo.
*/
void guardar(int vidas, int *matriz, int altura, int anchura, char dificultad) {
ofstream archivo;
int dif;
archivo.open("2048_savedata.txt", ios::out); //Creamos o reemplazamos el archivo
//Si no se puede guardar ERROR
if (archivo.fail())
{
cout << "Error al guardar la partida.\n";
exit(1);
}
if (dificultad == 'B')
{
dif = 0;
}
else
{
dif = 1;
}
archivo << vidas << endl; //Guardamos las vidas
archivo << altura << endl; //Guardamos las altura
archivo << anchura << endl; //Guardamos las anchura
archivo << dif << endl; //Guardamos la dificultad
//Guardamos la matriz
for (int i = 0; i < (altura*anchura); i++)
{
archivo << matriz[i] << " ";
}
cout << "\nPartida guardada con exito." << endl;
archivo.close(); //Cerramos el archivo
}
/* cargar
* Función que cargará la partida desde un archivo externo (.dat) en el vector de la matriz para proseguir jugando.
*/
int* cargar() {
ifstream archivo;
int i = 4, vidas, altura, anchura, dif;
int *partida;
archivo.open("2048_savedata.txt", ios::in); //Abrimos el archivo en modo lectura
//Si no se puede cargar ERROR
if (archivo.fail())
{
cout << "Error al abrir la partida guardada. El fichero no existe o está corrupto\n";
exit(1);
}
archivo >> vidas;
archivo >> altura;
archivo >> anchura;
archivo >> dif;
partida = (int*)malloc(4 * sizeof(int) + altura * anchura * sizeof(int)); //Reservamos memoria para los datos de la partida
partida[0] = vidas; //Guardamos vidas
partida[1] = altura; //Guardamos altura
partida[2] = anchura; //Guardamos anchura
partida[3] = dif; //Guardamos la dificultad
//Guardamos la matriz
while (!archivo.eof()) { //Mientras no sea el final del archivo
archivo >> partida[i];
i++;
}
archivo.close(); //Cerramos el archivo
return partida;
}
int* MostrarEspecificaciones()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int * especificacion;
especificacion = (int*)malloc(2 * sizeof(int));
for (int i = 0; i < 2; i++) {
especificacion[i] = 0;
}
especificacion[0] = prop.maxThreadsPerBlock;
especificacion[1] = *prop.maxGridSize;
return especificacion;
}
|
36dea9005881fb030da8fbbf85ba7facee48c089.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Util.h"
#define THREADS_PER_BLOCK 1024
int N;
int comm_size, proc_Id;
int dim, num_rows; // dim of data, number of rows the proc has
vector<int> indices, ptrs;
vector<int> data; // sparse matrix ka stuff
vector<int> vec; // vector
long long int *own_output, *output;
vector<int> own_mat_indices, own_mat_ptrs;
vector<int> own_mat_data; // own_matrix stuff
string out_file = "Output_";
__device__ long long int multRow(int noOfElems, int *colIndices,
int *nonZeroElems, int *vecTOR) {
long long int sum = 0;
for (int j = 0; j < noOfElems; j++) {
long long int num1 = nonZeroElems[j];
long long int num2 = vecTOR[colIndices[j]];
sum += (num1) * (num2);
}
return sum;
}
__global__ void multKernel(int *firstElemsRows, int *colIndices,
int *nonZeroElems, int numRows, int *vecTOR,
long long int *output) {
int currRow = blockIdx.x * blockDim.x + threadIdx.x;
if (currRow < numRows) {
int rowStart = firstElemsRows[currRow];
int rowEnd = firstElemsRows[currRow + 1];
output[currRow] = multRow(rowEnd - rowStart, colIndices + rowStart,
nonZeroElems + rowStart, vecTOR);
}
}
void getInput(char *in_file) {
if (proc_Id == 0) {
// get input in proc 0
ifstream f_in;
f_in.open(in_file);
// headers, dim and stuff
string junk, not_junk;
string temp;
int data_item;
int x, y;
f_in >> junk >> not_junk;
f_in >> junk >> dim >> not_junk;
f_in >> temp;
int xold = -1;
while (temp[0] != 'B') {
x = atoi(temp.c_str());
f_in >> y >> data_item;
data.push_back(data_item);
indices.push_back(y);
if (x != xold) {
int diff = x - xold - 1;
while (diff--) {
ptrs.push_back(indices.size() - 1);
}
ptrs.push_back(indices.size() - 1);
xold = x;
}
f_in >> temp;
}
int endIndex = ptrs.size();
int differ = dim - endIndex;
while (differ--) ptrs.push_back(indices.size());
vec.resize(dim);
for (int i = 0; i < dim; i++) {
f_in >> data_item;
vec[i] = data_item;
}
f_in.close();
// pick up left over rows for proc 0
num_rows += dim % comm_size;
}
// tell everyone about their load
MPI_Bcast(&dim, 1, MPI_INT, 0, MPI_COMM_WORLD);
// send the vector
vec.resize(dim);
MPI_Bcast(&vec[0], dim, MPI_INT, 0, MPI_COMM_WORLD);
int chunk = dim / comm_size;
num_rows += chunk;
// prepare the output vector
// send the matrix rows
if (proc_Id != 0) {
own_mat_ptrs.resize(num_rows + 1);
MPI_Recv(&own_mat_ptrs[0], own_mat_ptrs.size(), MPI_INT, 0, proc_Id,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
own_mat_data.resize(own_mat_ptrs.back());
own_mat_indices.resize(own_mat_ptrs.back());
MPI_Recv(&own_mat_indices[0], own_mat_indices.size(), MPI_INT, 0,
proc_Id, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&own_mat_data[0], own_mat_data.size(), MPI_INT, 0, proc_Id,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
} else {
int next_index = num_rows;
int next = ptrs[next_index];
int temp, old_index;
vector<int> temp_ptr, temp_indices;
vector<int> temp_data;
// own stuff
own_mat_indices = sub(indices, 0, next);
own_mat_data = sub(data, 0, next);
own_mat_ptrs = sub(ptrs, 0, num_rows);
own_mat_ptrs.push_back(own_mat_indices.size());
// send load to others
for (int i = 1; i < comm_size - 1; i++) {
old_index = next_index;
next_index += chunk;
temp = next;
next = ptrs[next_index];
temp_data = sub(data, temp, next);
temp_indices = sub(indices, temp, next);
temp_ptr = sub(ptrs, old_index, next_index);
mapped_subtract(temp_ptr, temp);
temp_ptr.push_back(temp_indices.size());
MPI_Send(&temp_ptr[0], temp_ptr.size(), MPI_INT, i, i,
MPI_COMM_WORLD);
MPI_Send(&temp_indices[0], temp_indices.size(), MPI_INT, i, i,
MPI_COMM_WORLD);
MPI_Send(&temp_data[0], temp_data.size(), MPI_INT, i, i,
MPI_COMM_WORLD);
}
// final process' load
temp_data = sub(data, next, data.size());
temp_indices = sub(indices, next, indices.size());
temp_ptr = sub(ptrs, next_index, ptrs.size());
mapped_subtract(temp_ptr, next);
temp_ptr.push_back(temp_indices.size());
MPI_Send(&temp_ptr[0], temp_ptr.size(), MPI_INT, comm_size - 1,
comm_size - 1, MPI_COMM_WORLD);
// cout << "sending to last2 "<< temp_indices.size() << endl;
MPI_Send(&temp_indices[0], temp_indices.size(), MPI_INT, comm_size - 1,
comm_size - 1, MPI_COMM_WORLD);
// cout << "sending to last3 "<< temp_data.size() << endl;
MPI_Send(&temp_data[0], temp_data.size(), MPI_INT, comm_size - 1,
comm_size - 1, MPI_COMM_WORLD);
}
}
void getOutput() {
if (proc_Id == 0) {
output = new long long int[dim];
memcpy(output, own_output, num_rows * sizeof(long long int));
int len, totallen = num_rows;
for (int i = 1; i < comm_size; i++) {
MPI_Recv(&len, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(output + totallen, len, MPI_LONG_LONG_INT, i, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
totallen += len;
}
} else {
for (int i = 1; i < comm_size; i++) {
MPI_Send(&num_rows, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(own_output, num_rows, MPI_LONG_LONG_INT, 0, 0,
MPI_COMM_WORLD);
}
}
}
void wrapperForCuda() {
int dimension = dim;
// Get the number of rows being handled by the current process
int numRows = num_rows;
// Each partition's rows[i], colsIndices[i] and values[i]
int *currPartitionFirstElemsRows;
currPartitionFirstElemsRows = &own_mat_ptrs[0];
int *currPartitionColIndices;
currPartitionColIndices = &own_mat_indices[0];
int *currPartitionNonZeroElems;
currPartitionNonZeroElems = &own_mat_data[0];
// Commom vector for all processes
int *vecTOR;
vecTOR = &vec[0];
// Device copies for computation
int *devCurrPartitionFirstElemsRows;
int *devCurrPartitionColIndices;
int *devCurrPartitionNonZeroElems;
int *devVec;
long long int *devFinalVec;
int size1 = own_mat_ptrs.size() * sizeof(int);
int size2 = own_mat_indices.size() * sizeof(int);
int size3 = own_mat_data.size() * sizeof(int);
// Current process's computed output
own_output = (long long int *)malloc(sizeof(long long int) * numRows);
// once
hipMalloc((void **)&devFinalVec, numRows * sizeof(long long int));
hipMalloc((void **)&devVec, dimension * sizeof(int));
N = numRows;
hipMalloc((void **)&devCurrPartitionFirstElemsRows, size1);
hipMalloc((void **)&devCurrPartitionColIndices, size2);
hipMalloc((void **)&devCurrPartitionNonZeroElems, size3);
hipMemcpy(devCurrPartitionFirstElemsRows, currPartitionFirstElemsRows,
size1, hipMemcpyHostToDevice);
hipMemcpy(devCurrPartitionColIndices, currPartitionColIndices, size2,
hipMemcpyHostToDevice);
hipMemcpy(devCurrPartitionNonZeroElems, currPartitionNonZeroElems, size3,
hipMemcpyHostToDevice);
hipMemcpy(devVec, vecTOR, dimension * sizeof(int), hipMemcpyHostToDevice);
// Tuning for the problem size
int blocks;
int thrds;
if (num_rows < THREADS_PER_BLOCK) {
blocks = 1;
thrds = num_rows;
} else {
thrds = THREADS_PER_BLOCK;
blocks = (num_rows / thrds) + 1;
}
hipLaunchKernelGGL(( multKernel), dim3(blocks), dim3(thrds), 0, 0,
devCurrPartitionFirstElemsRows, devCurrPartitionColIndices,
devCurrPartitionNonZeroElems, numRows, devVec, devFinalVec);
hipMemcpy(own_output, devFinalVec, numRows * sizeof(long long int),
hipMemcpyDeviceToHost);
}
void computeForEachProcess() { wrapperForCuda(); }
void fileWrite(char *name) {
if (proc_Id == 0) {
ofstream f_out;
f_out.open(name);
for (int i = 0; i < dim; i++) {
f_out << output[i] << '\n';
}
f_out.close();
}
}
int main(int argc, char *argv[]) {
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &proc_Id);
num_rows = 0;
string in_file = argv[1];
getInput(argv[1]);
//--- Till this point every process has a copy of the vector and a CSR
//representation of its slice of the matrix
computeForEachProcess();
// Gathering outputs from each process
getOutput();
fileWrite(argv[2]);
MPI_Finalize();
}
| 36dea9005881fb030da8fbbf85ba7facee48c089.cu | #include "Util.h"
#define THREADS_PER_BLOCK 1024
int N;
int comm_size, proc_Id;
int dim, num_rows; // dim of data, number of rows the proc has
vector<int> indices, ptrs;
vector<int> data; // sparse matrix ka stuff
vector<int> vec; // vector
long long int *own_output, *output;
vector<int> own_mat_indices, own_mat_ptrs;
vector<int> own_mat_data; // own_matrix stuff
string out_file = "Output_";
__device__ long long int multRow(int noOfElems, int *colIndices,
int *nonZeroElems, int *vecTOR) {
long long int sum = 0;
for (int j = 0; j < noOfElems; j++) {
long long int num1 = nonZeroElems[j];
long long int num2 = vecTOR[colIndices[j]];
sum += (num1) * (num2);
}
return sum;
}
__global__ void multKernel(int *firstElemsRows, int *colIndices,
int *nonZeroElems, int numRows, int *vecTOR,
long long int *output) {
int currRow = blockIdx.x * blockDim.x + threadIdx.x;
if (currRow < numRows) {
int rowStart = firstElemsRows[currRow];
int rowEnd = firstElemsRows[currRow + 1];
output[currRow] = multRow(rowEnd - rowStart, colIndices + rowStart,
nonZeroElems + rowStart, vecTOR);
}
}
void getInput(char *in_file) {
if (proc_Id == 0) {
// get input in proc 0
ifstream f_in;
f_in.open(in_file);
// headers, dim and stuff
string junk, not_junk;
string temp;
int data_item;
int x, y;
f_in >> junk >> not_junk;
f_in >> junk >> dim >> not_junk;
f_in >> temp;
int xold = -1;
while (temp[0] != 'B') {
x = atoi(temp.c_str());
f_in >> y >> data_item;
data.push_back(data_item);
indices.push_back(y);
if (x != xold) {
int diff = x - xold - 1;
while (diff--) {
ptrs.push_back(indices.size() - 1);
}
ptrs.push_back(indices.size() - 1);
xold = x;
}
f_in >> temp;
}
int endIndex = ptrs.size();
int differ = dim - endIndex;
while (differ--) ptrs.push_back(indices.size());
vec.resize(dim);
for (int i = 0; i < dim; i++) {
f_in >> data_item;
vec[i] = data_item;
}
f_in.close();
// pick up left over rows for proc 0
num_rows += dim % comm_size;
}
// tell everyone about their load
MPI_Bcast(&dim, 1, MPI_INT, 0, MPI_COMM_WORLD);
// send the vector
vec.resize(dim);
MPI_Bcast(&vec[0], dim, MPI_INT, 0, MPI_COMM_WORLD);
int chunk = dim / comm_size;
num_rows += chunk;
// prepare the output vector
// send the matrix rows
if (proc_Id != 0) {
own_mat_ptrs.resize(num_rows + 1);
MPI_Recv(&own_mat_ptrs[0], own_mat_ptrs.size(), MPI_INT, 0, proc_Id,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
own_mat_data.resize(own_mat_ptrs.back());
own_mat_indices.resize(own_mat_ptrs.back());
MPI_Recv(&own_mat_indices[0], own_mat_indices.size(), MPI_INT, 0,
proc_Id, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&own_mat_data[0], own_mat_data.size(), MPI_INT, 0, proc_Id,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
} else {
int next_index = num_rows;
int next = ptrs[next_index];
int temp, old_index;
vector<int> temp_ptr, temp_indices;
vector<int> temp_data;
// own stuff
own_mat_indices = sub(indices, 0, next);
own_mat_data = sub(data, 0, next);
own_mat_ptrs = sub(ptrs, 0, num_rows);
own_mat_ptrs.push_back(own_mat_indices.size());
// send load to others
for (int i = 1; i < comm_size - 1; i++) {
old_index = next_index;
next_index += chunk;
temp = next;
next = ptrs[next_index];
temp_data = sub(data, temp, next);
temp_indices = sub(indices, temp, next);
temp_ptr = sub(ptrs, old_index, next_index);
mapped_subtract(temp_ptr, temp);
temp_ptr.push_back(temp_indices.size());
MPI_Send(&temp_ptr[0], temp_ptr.size(), MPI_INT, i, i,
MPI_COMM_WORLD);
MPI_Send(&temp_indices[0], temp_indices.size(), MPI_INT, i, i,
MPI_COMM_WORLD);
MPI_Send(&temp_data[0], temp_data.size(), MPI_INT, i, i,
MPI_COMM_WORLD);
}
// final process' load
temp_data = sub(data, next, data.size());
temp_indices = sub(indices, next, indices.size());
temp_ptr = sub(ptrs, next_index, ptrs.size());
mapped_subtract(temp_ptr, next);
temp_ptr.push_back(temp_indices.size());
MPI_Send(&temp_ptr[0], temp_ptr.size(), MPI_INT, comm_size - 1,
comm_size - 1, MPI_COMM_WORLD);
// cout << "sending to last2 "<< temp_indices.size() << endl;
MPI_Send(&temp_indices[0], temp_indices.size(), MPI_INT, comm_size - 1,
comm_size - 1, MPI_COMM_WORLD);
// cout << "sending to last3 "<< temp_data.size() << endl;
MPI_Send(&temp_data[0], temp_data.size(), MPI_INT, comm_size - 1,
comm_size - 1, MPI_COMM_WORLD);
}
}
void getOutput() {
if (proc_Id == 0) {
output = new long long int[dim];
memcpy(output, own_output, num_rows * sizeof(long long int));
int len, totallen = num_rows;
for (int i = 1; i < comm_size; i++) {
MPI_Recv(&len, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(output + totallen, len, MPI_LONG_LONG_INT, i, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
totallen += len;
}
} else {
for (int i = 1; i < comm_size; i++) {
MPI_Send(&num_rows, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(own_output, num_rows, MPI_LONG_LONG_INT, 0, 0,
MPI_COMM_WORLD);
}
}
}
void wrapperForCuda() {
int dimension = dim;
// Get the number of rows being handled by the current process
int numRows = num_rows;
// Each partition's rows[i], colsIndices[i] and values[i]
int *currPartitionFirstElemsRows;
currPartitionFirstElemsRows = &own_mat_ptrs[0];
int *currPartitionColIndices;
currPartitionColIndices = &own_mat_indices[0];
int *currPartitionNonZeroElems;
currPartitionNonZeroElems = &own_mat_data[0];
// Commom vector for all processes
int *vecTOR;
vecTOR = &vec[0];
// Device copies for computation
int *devCurrPartitionFirstElemsRows;
int *devCurrPartitionColIndices;
int *devCurrPartitionNonZeroElems;
int *devVec;
long long int *devFinalVec;
int size1 = own_mat_ptrs.size() * sizeof(int);
int size2 = own_mat_indices.size() * sizeof(int);
int size3 = own_mat_data.size() * sizeof(int);
// Current process's computed output
own_output = (long long int *)malloc(sizeof(long long int) * numRows);
// once
cudaMalloc((void **)&devFinalVec, numRows * sizeof(long long int));
cudaMalloc((void **)&devVec, dimension * sizeof(int));
N = numRows;
cudaMalloc((void **)&devCurrPartitionFirstElemsRows, size1);
cudaMalloc((void **)&devCurrPartitionColIndices, size2);
cudaMalloc((void **)&devCurrPartitionNonZeroElems, size3);
cudaMemcpy(devCurrPartitionFirstElemsRows, currPartitionFirstElemsRows,
size1, cudaMemcpyHostToDevice);
cudaMemcpy(devCurrPartitionColIndices, currPartitionColIndices, size2,
cudaMemcpyHostToDevice);
cudaMemcpy(devCurrPartitionNonZeroElems, currPartitionNonZeroElems, size3,
cudaMemcpyHostToDevice);
cudaMemcpy(devVec, vecTOR, dimension * sizeof(int), cudaMemcpyHostToDevice);
// Tuning for the problem size
int blocks;
int thrds;
if (num_rows < THREADS_PER_BLOCK) {
blocks = 1;
thrds = num_rows;
} else {
thrds = THREADS_PER_BLOCK;
blocks = (num_rows / thrds) + 1;
}
multKernel<<<blocks, thrds>>>(
devCurrPartitionFirstElemsRows, devCurrPartitionColIndices,
devCurrPartitionNonZeroElems, numRows, devVec, devFinalVec);
cudaMemcpy(own_output, devFinalVec, numRows * sizeof(long long int),
cudaMemcpyDeviceToHost);
}
void computeForEachProcess() { wrapperForCuda(); }
void fileWrite(char *name) {
if (proc_Id == 0) {
ofstream f_out;
f_out.open(name);
for (int i = 0; i < dim; i++) {
f_out << output[i] << '\n';
}
f_out.close();
}
}
int main(int argc, char *argv[]) {
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &proc_Id);
num_rows = 0;
string in_file = argv[1];
getInput(argv[1]);
//--- Till this point every process has a copy of the vector and a CSR
//representation of its slice of the matrix
computeForEachProcess();
// Gathering outputs from each process
getOutput();
fileWrite(argv[2]);
MPI_Finalize();
}
|
79904bf59b4148448135311a637364888e338777.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019-2022 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "../common/cuda_context.cuh"
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#include "array_interface.h"
#include "device_adapter_hip.cuh"
#include "simple_dmatrix.h"
#include "validation.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/logging.h"
namespace xgboost {
namespace {
auto SetDeviceToPtr(void const* ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
return ptr_device;
}
template <typename T, int32_t D>
void CopyTensorInfoImpl(CUDAContext const* ctx, Json arr_interface, linalg::Tensor<T, D>* p_out) {
ArrayInterface<D> array(arr_interface);
if (array.n == 0) {
p_out->SetDevice(0);
p_out->Reshape(array.shape);
return;
}
CHECK_EQ(array.valid.Capacity(), 0)
<< "Meta info like label or weight can not have missing value.";
auto ptr_device = SetDeviceToPtr(array.data);
p_out->SetDevice(ptr_device);
if (array.is_contiguous && array.type == ToDType<T>::kType) {
p_out->ModifyInplace([&](HostDeviceVector<T>* data, common::Span<size_t, D> shape) {
// set shape
std::copy(array.shape, array.shape + D, shape.data());
// set data
data->Resize(array.n);
dh::safe_cuda(hipMemcpyAsync(data->DevicePointer(), array.data, array.n * sizeof(T),
hipMemcpyDefault, ctx->Stream()));
});
return;
}
p_out->Reshape(array.shape);
auto t = p_out->View(ptr_device);
linalg::ElementWiseTransformDevice(
t,
[=] __device__(size_t i, T) {
return linalg::detail::Apply(TypedIndex<T, D>{array},
linalg::UnravelIndex<D>(i, array.shape));
},
ctx->Stream());
}
void CopyGroupInfoImpl(ArrayInterface<1> column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterfaceHandler::kF4 && column.type != ArrayInterfaceHandler::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.Shape(0));
auto d_tmp = temp.data().get();
dh::LaunchN(column.Shape(0),
[=] __device__(size_t idx) { d_tmp[idx] = TypedIndex<size_t, 1>{column}(idx); });
auto length = column.Shape(0);
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface<1> array_interface, std::vector<bst_group_t>* p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) {
return TypedIndex<uint32_t, 1>{array_interface}(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.Shape(0) - 1, [=] __device__(size_t i) {
auto typed = TypedIndex<uint32_t, 1>{array_interface};
if (typed(i) > typed(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(hipMemcpy(&non_dec, flag.data().get(), sizeof(bool),
hipMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.Shape(0));
dh::caching_device_vector<uint32_t> cnt(array_interface.Shape(0));
HostDeviceVector<int> d_num_runs_out(1, 0, d);
hipcub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
dh::caching_device_vector<char> tmp(bytes);
hipcub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::hip::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
} // namespace
void MetaInfo::SetInfoFromCUDA(Context const& ctx, StringView key, Json array) {
// multi-dim float info
if (key == "base_margin") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &base_margin_);
return;
} else if (key == "label") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &labels);
auto ptr = labels.Data()->ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels.Size(), data::LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
return;
}
// uint info
if (key == "group") {
ArrayInterface<1> array_interface{array};
CopyGroupInfoImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
ArrayInterface<1> array_interface{array};
CopyQidImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
}
// float info
linalg::Tensor<float, 1> t;
CopyTensorInfoImpl(ctx.CUDACtx(), array, &t);
if (key == "weight") {
this->weights_ = std::move(*t.Data());
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(), data::WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "label_lower_bound") {
this->labels_lower_bound_ = std::move(*t.Data());
} else if (key == "label_upper_bound") {
this->labels_upper_bound_ = std::move(*t.Data());
} else if (key == "feature_weights") {
this->feature_weights = std::move(*t.Data());
auto d_feature_weights = feature_weights.ConstDeviceSpan();
auto valid =
thrust::none_of(ctx.CUDACtx()->CTP(), d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), data::WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
} else {
LOG(FATAL) << "Unknown key for MetaInfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread, data_split_mode);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
} // namespace xgboost
| 79904bf59b4148448135311a637364888e338777.cu | /**
* Copyright 2019-2022 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "../common/cuda_context.cuh"
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#include "array_interface.h"
#include "device_adapter.cuh"
#include "simple_dmatrix.h"
#include "validation.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/logging.h"
namespace xgboost {
namespace {
auto SetDeviceToPtr(void const* ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
return ptr_device;
}
template <typename T, int32_t D>
void CopyTensorInfoImpl(CUDAContext const* ctx, Json arr_interface, linalg::Tensor<T, D>* p_out) {
ArrayInterface<D> array(arr_interface);
if (array.n == 0) {
p_out->SetDevice(0);
p_out->Reshape(array.shape);
return;
}
CHECK_EQ(array.valid.Capacity(), 0)
<< "Meta info like label or weight can not have missing value.";
auto ptr_device = SetDeviceToPtr(array.data);
p_out->SetDevice(ptr_device);
if (array.is_contiguous && array.type == ToDType<T>::kType) {
p_out->ModifyInplace([&](HostDeviceVector<T>* data, common::Span<size_t, D> shape) {
// set shape
std::copy(array.shape, array.shape + D, shape.data());
// set data
data->Resize(array.n);
dh::safe_cuda(cudaMemcpyAsync(data->DevicePointer(), array.data, array.n * sizeof(T),
cudaMemcpyDefault, ctx->Stream()));
});
return;
}
p_out->Reshape(array.shape);
auto t = p_out->View(ptr_device);
linalg::ElementWiseTransformDevice(
t,
[=] __device__(size_t i, T) {
return linalg::detail::Apply(TypedIndex<T, D>{array},
linalg::UnravelIndex<D>(i, array.shape));
},
ctx->Stream());
}
void CopyGroupInfoImpl(ArrayInterface<1> column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterfaceHandler::kF4 && column.type != ArrayInterfaceHandler::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.Shape(0));
auto d_tmp = temp.data().get();
dh::LaunchN(column.Shape(0),
[=] __device__(size_t idx) { d_tmp[idx] = TypedIndex<size_t, 1>{column}(idx); });
auto length = column.Shape(0);
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface<1> array_interface, std::vector<bst_group_t>* p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) {
return TypedIndex<uint32_t, 1>{array_interface}(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.Shape(0) - 1, [=] __device__(size_t i) {
auto typed = TypedIndex<uint32_t, 1>{array_interface};
if (typed(i) > typed(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(cudaMemcpy(&non_dec, flag.data().get(), sizeof(bool),
cudaMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.Shape(0));
dh::caching_device_vector<uint32_t> cnt(array_interface.Shape(0));
HostDeviceVector<int> d_num_runs_out(1, 0, d);
cub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
dh::caching_device_vector<char> tmp(bytes);
cub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::cuda::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
} // namespace
void MetaInfo::SetInfoFromCUDA(Context const& ctx, StringView key, Json array) {
// multi-dim float info
if (key == "base_margin") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &base_margin_);
return;
} else if (key == "label") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &labels);
auto ptr = labels.Data()->ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels.Size(), data::LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
return;
}
// uint info
if (key == "group") {
ArrayInterface<1> array_interface{array};
CopyGroupInfoImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
ArrayInterface<1> array_interface{array};
CopyQidImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
}
// float info
linalg::Tensor<float, 1> t;
CopyTensorInfoImpl(ctx.CUDACtx(), array, &t);
if (key == "weight") {
this->weights_ = std::move(*t.Data());
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(), data::WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "label_lower_bound") {
this->labels_lower_bound_ = std::move(*t.Data());
} else if (key == "label_upper_bound") {
this->labels_upper_bound_ = std::move(*t.Data());
} else if (key == "feature_weights") {
this->feature_weights = std::move(*t.Data());
auto d_feature_weights = feature_weights.ConstDeviceSpan();
auto valid =
thrust::none_of(ctx.CUDACtx()->CTP(), d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), data::WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
} else {
LOG(FATAL) << "Unknown key for MetaInfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread, data_split_mode);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
} // namespace xgboost
|
2ff0379f273f0e09f61e3b3908d20893cc5ffd7d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parametric_rectified_linear_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "../parametric_rectified_linear_layer.h"
#include "../nn_types.h"
namespace nnforge
{
namespace cuda
{
__global__ void parametric_rectified_linear_kernel(
float * __restrict output,
const float * __restrict input,
const float * __restrict data,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (feature_map_id < feature_map_count) && (entry_id < entry_count))
{
float a = __load_nc(data + feature_map_id);
int offset = (entry_id * feature_map_count + feature_map_id) * elem_count_per_feature_map + elem_id;
float input_val = input[offset];
float output_val = input_val * (input_val >= 0.0F ? 1.0F : a);
output[offset] = output_val;
}
}
parametric_rectified_linear_layer_tester_cuda::parametric_rectified_linear_layer_tester_cuda()
{
}
parametric_rectified_linear_layer_tester_cuda::~parametric_rectified_linear_layer_tester_cuda()
{
}
void parametric_rectified_linear_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
hipLaunchKernelGGL(( parametric_rectified_linear_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_buffer,
*input_buffers[0],
*data[0],
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
}
int parametric_rectified_linear_layer_tester_cuda::get_input_index_layer_can_write() const
{
return 0;
}
}
}
| 2ff0379f273f0e09f61e3b3908d20893cc5ffd7d.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parametric_rectified_linear_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "../parametric_rectified_linear_layer.h"
#include "../nn_types.h"
namespace nnforge
{
namespace cuda
{
__global__ void parametric_rectified_linear_kernel(
float * __restrict output,
const float * __restrict input,
const float * __restrict data,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
if ((elem_id < elem_count_per_feature_map) && (feature_map_id < feature_map_count) && (entry_id < entry_count))
{
float a = __load_nc(data + feature_map_id);
int offset = (entry_id * feature_map_count + feature_map_id) * elem_count_per_feature_map + elem_id;
float input_val = input[offset];
float output_val = input_val * (input_val >= 0.0F ? 1.0F : a);
output[offset] = output_val;
}
}
parametric_rectified_linear_layer_tester_cuda::parametric_rectified_linear_layer_tester_cuda()
{
}
parametric_rectified_linear_layer_tester_cuda::~parametric_rectified_linear_layer_tester_cuda()
{
}
void parametric_rectified_linear_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
parametric_rectified_linear_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_buffer,
*input_buffers[0],
*data[0],
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
}
int parametric_rectified_linear_layer_tester_cuda::get_input_index_layer_can_write() const
{
return 0;
}
}
}
|
2760dc635e77357c08473f43b4423fcd7c9636d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
struct TensorPowOp {
TensorPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(*in, val);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(*v, val);
}
const float val;
};
void THCudaTensor_pow(THCState *state, THCudaTensor *self_, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorTPowOp {
TensorTPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(val, *in);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(val, *v);
}
const float val;
};
void THCudaTensor_tpow(THCState *state, THCudaTensor *self_, float value, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorATan2Op {
__device__ __forceinline__ void operator()(float* out, float* a, float* b) {
*out = atan2f(*a, *b);
}
};
void THCudaTensor_atan2(THCState *state, THCudaTensor *self_, THCudaTensor *tx, THCudaTensor *ty)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, tx, ty));
THArgCheck(THCudaTensor_nElement(state, tx) ==
THCudaTensor_nElement(state, ty), 3, "sizes do not match");
THCudaTensor_resizeAs(state, self_, tx);
if (!THCudaTensor_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
struct TensorClampOp {
TensorClampOp(float min, float max) : minValue(min), maxValue(max) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(min(*in, maxValue), minValue);
}
__device__ __forceinline__ void operator()(float* v) {
*v = max(min(*v, maxValue), minValue);
}
const float minValue;
const float maxValue;
};
void THCudaTensor_clamp(THCState *state, THCudaTensor *self_, THCudaTensor *src, float min_value,
float max_value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorSignOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
float orig = *in;
*out = (orig > 0) - (orig < 0);
}
__device__ __forceinline__ void operator()(float* v) {
float orig = *v;
*v = (orig > 0) - (orig < 0);
}
};
void THCudaTensor_sign(THCState *state, THCudaTensor *self_, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
float THCudaTensor_meanall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCudaTensor_sumall(state, self)/THCudaTensor_nElement(state, self);
}
void
THCudaTensor_mean(THCState *state, THCudaTensor *self, THCudaTensor *src, long dim)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor_sum(state, self, src, dim);
THCudaTensor_div(state, self, self, THCudaTensor_size(state, src, dim));
}
struct square_functor
{
const float mean;
square_functor(float mean_) : mean(mean_) {}
__host__ __device__ float operator()(const float& x) const
{
return (x-mean)*(x-mean);
}
};
float THCudaTensor_varall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float mean = THCudaTensor_meanall(state, self);
float result =
thrust::transform_reduce(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, square_functor(mean),
(float)0, thrust::plus<float>());
result = result/(THCudaTensor_nElement(state, self)-1);
THCudaTensor_free(state, self);
return result;
}
float THCudaTensor_stdall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
return sqrt(THCudaTensor_varall(state, self));
}
// Given the sum of values and the sum of squares, compute the variance or standard deviation.
template<bool flag, bool apply_sqrt>
__forceinline__ __device__ float THCudaTensor_computeVar(float sum, float sum2, unsigned row_size) {
if (flag) {
sum /= row_size;
sum2 /= row_size;
sum2 -= sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
else {
sum /= row_size;
sum2 /= row_size - 1;
sum2 -= ((float)row_size) / ((float)(row_size - 1)) * sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
if (apply_sqrt)
return sqrt(sum2);
else
return sum2;
}
/* Compute the variance (or standard deviation) along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varOuterDim(float *tgt, float *src_, unsigned num_orows, unsigned num_irows, unsigned row_size)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
float *src = src_ + orow * row_size * num_irows + irow;
float sum = 0, sum2 = 0;
for (unsigned col = 0; col < row_size; ++col) {
float val = *src;
sum += val;
sum2 += val * val;
src += num_irows;
}
tgt[orow * num_irows + irow] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varOuterDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, long dimension, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (unsigned dim = 0; dim < dimension; dim++) {
num_orows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x)));
if (flag) {
hipLaunchKernelGGL(( THCudaTensor_kernel_varOuterDim<true, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
} else {
hipLaunchKernelGGL(( THCudaTensor_kernel_varOuterDim<false, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
/* Compute the variance (or standard deviation) of the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varInnermostDim(float *tgt, float *src_, unsigned num_rows, unsigned row_size)
{
__shared__ float ssum[32][16];
__shared__ float ssum2[32][16];
for (unsigned block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
float sum = 0, sum2 = 0;
if (row < num_rows) {
float *src = src_ + row * row_size;
// Sequential reduction within a thread.
for (unsigned col = threadIdx.x; col < row_size; col += blockDim.x) {
float val = src[col];
sum += val;
sum2 += val * val;
}
}
ssum[threadIdx.y][threadIdx.x] = sum;
ssum2[threadIdx.y][threadIdx.x] = sum2;
__syncthreads();
// Reduce intermediate values to single value.
for (unsigned s = 8; s > 1; s >>= 1) {
if (row < num_rows && threadIdx.x < s) {
ssum[threadIdx.y][threadIdx.x] += ssum[threadIdx.y][threadIdx.x + s];
ssum2[threadIdx.y][threadIdx.x] += ssum2[threadIdx.y][threadIdx.x + s];
}
__syncthreads();
}
if (row < num_rows && threadIdx.x == 0) {
sum = ssum[threadIdx.y][0] + ssum[threadIdx.y][1];
sum2 = ssum2[threadIdx.y][0] + ssum2[threadIdx.y][1];
tgt[row] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
__syncthreads();
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varInnermostDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, ndim - 1);
// From limited testing, 16x32 seemed a good compromise for handling both long and short dimensions.
dim3 threads(16, 32);
dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y)));
if (flag) {
hipLaunchKernelGGL(( THCudaTensor_kernel_varInnermostDim<true, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
} else {
hipLaunchKernelGGL(( THCudaTensor_kernel_varInnermostDim<false, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
void THCudaTensor_var(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<false>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<false>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
void THCudaTensor_std(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<true>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<true>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
struct norm_functor
{
const float exponent;
norm_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x) const
{
return pow(fabs(x), exponent);
}
};
struct partial_not_equal_functor
{
const float rhs;
partial_not_equal_functor(float rhs) : rhs(rhs) {}
__host__ __device__ bool operator()(const float &lhs) const {return lhs != rhs;}
};
float THCudaTensor_normall(THCState *state, THCudaTensor *self, float value)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float result;
if(value == 0.0f) {
result = thrust::transform_reduce(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, partial_not_equal_functor(0.0f),
(float)0, thrust::plus<float>());
} else {
result = thrust::transform_reduce(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, norm_functor(value),
(float)0, thrust::plus<float>());
result = pow(result, (float)1.0/value);
}
THCudaTensor_free(state, self);
return result;
}
void THCudaTensor_norm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (value == 0.0f) {
THCudaTensor_reduceDim(state, self, src,
partial_not_equal_functor(0.0f), thrust::plus<float>(),
0.0f, dimension);
} else {
THCudaTensor_reduceDim(state, self, src,
norm_functor(value), thrust::plus<float>(),
0.0f, dimension);
THCudaTensor_pow(state, self, self, 1/value);
}
THCudaCheck(hipGetLastError());
}
__global__ void THCudaTensor_kernel_renorm(float *data, const float value, const long size, const float maxnorm)
{
__shared__ float buffer[32];
long tx = threadIdx.x;
long bx = blockIdx.x;
long step = blockDim.x;
float *row = data + size*bx;
buffer[tx] = 0;
// get norm of axis
for (long i=tx; i<size; i+=step)
{
buffer[tx] += pow(fabs(row[i]), value);
}
// add (reduce)
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
// clip norms
__syncthreads();
float norm = pow(buffer[0], 1/value);
if (norm > maxnorm)
{
norm = maxnorm / (norm + 1e-7);
// renormalize
for (long i=tx; i<size; i+=step)
{
row[i] *= norm;
}
}
}
void THCudaTensor_renorm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension, float maxnorm)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor *self_;
THCudaTensor *src_ = THCudaTensor_newTranspose(state, src, dimension, 0);
THCudaTensor *data = THCudaTensor_newClone(state, src_);
long size = THCudaTensor_nElement(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(state, src), 3, "invalid dimension");
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THCudaTensor_nDimension(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCudaTensor_kernel_renorm), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCudaTensor_data(state, data), value, size, maxnorm);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCudaTensor_free(state, src_);
self_ = THCudaTensor_newTranspose(state, data, dimension, 0);
THCudaTensor_resizeAs(state, self, self_);
THCudaTensor_freeCopyTo(state, self_, self);
THCudaTensor_free(state, data);
}
struct dist_functor
{
const float exponent;
dist_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return pow(fabs(x-y), exponent);
}
};
float THCudaTensor_dist(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
src = THCudaTensor_newContiguous(state, src);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
thrust::device_ptr<float> src_data(THCudaTensor_data(state, src));
float result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, (float) 0,
thrust::plus<float>(), dist_functor(value));
THCudaTensor_free(state, src);
THCudaTensor_free(state, self);
return pow(result, (float)1.0/value);
}
void THCudaTensor_rand(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_uniform(state, r_, 0, 1);
}
void THCudaTensor_randn(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_normal(state, r_, 0, 1);
}
| 2760dc635e77357c08473f43b4423fcd7c9636d0.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
struct TensorPowOp {
TensorPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(*in, val);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(*v, val);
}
const float val;
};
void THCudaTensor_pow(THCState *state, THCudaTensor *self_, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorTPowOp {
TensorTPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(val, *in);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(val, *v);
}
const float val;
};
void THCudaTensor_tpow(THCState *state, THCudaTensor *self_, float value, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorATan2Op {
__device__ __forceinline__ void operator()(float* out, float* a, float* b) {
*out = atan2f(*a, *b);
}
};
void THCudaTensor_atan2(THCState *state, THCudaTensor *self_, THCudaTensor *tx, THCudaTensor *ty)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, tx, ty));
THArgCheck(THCudaTensor_nElement(state, tx) ==
THCudaTensor_nElement(state, ty), 3, "sizes do not match");
THCudaTensor_resizeAs(state, self_, tx);
if (!THCudaTensor_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
struct TensorClampOp {
TensorClampOp(float min, float max) : minValue(min), maxValue(max) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(min(*in, maxValue), minValue);
}
__device__ __forceinline__ void operator()(float* v) {
*v = max(min(*v, maxValue), minValue);
}
const float minValue;
const float maxValue;
};
void THCudaTensor_clamp(THCState *state, THCudaTensor *self_, THCudaTensor *src, float min_value,
float max_value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorSignOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
float orig = *in;
*out = (orig > 0) - (orig < 0);
}
__device__ __forceinline__ void operator()(float* v) {
float orig = *v;
*v = (orig > 0) - (orig < 0);
}
};
void THCudaTensor_sign(THCState *state, THCudaTensor *self_, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
float THCudaTensor_meanall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCudaTensor_sumall(state, self)/THCudaTensor_nElement(state, self);
}
void
THCudaTensor_mean(THCState *state, THCudaTensor *self, THCudaTensor *src, long dim)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor_sum(state, self, src, dim);
THCudaTensor_div(state, self, self, THCudaTensor_size(state, src, dim));
}
struct square_functor
{
const float mean;
square_functor(float mean_) : mean(mean_) {}
__host__ __device__ float operator()(const float& x) const
{
return (x-mean)*(x-mean);
}
};
float THCudaTensor_varall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float mean = THCudaTensor_meanall(state, self);
float result =
thrust::transform_reduce(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, square_functor(mean),
(float)0, thrust::plus<float>());
result = result/(THCudaTensor_nElement(state, self)-1);
THCudaTensor_free(state, self);
return result;
}
float THCudaTensor_stdall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
return sqrt(THCudaTensor_varall(state, self));
}
// Given the sum of values and the sum of squares, compute the variance or standard deviation.
template<bool flag, bool apply_sqrt>
__forceinline__ __device__ float THCudaTensor_computeVar(float sum, float sum2, unsigned row_size) {
if (flag) {
sum /= row_size;
sum2 /= row_size;
sum2 -= sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
else {
sum /= row_size;
sum2 /= row_size - 1;
sum2 -= ((float)row_size) / ((float)(row_size - 1)) * sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
if (apply_sqrt)
return sqrt(sum2);
else
return sum2;
}
/* Compute the variance (or standard deviation) along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varOuterDim(float *tgt, float *src_, unsigned num_orows, unsigned num_irows, unsigned row_size)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
float *src = src_ + orow * row_size * num_irows + irow;
float sum = 0, sum2 = 0;
for (unsigned col = 0; col < row_size; ++col) {
float val = *src;
sum += val;
sum2 += val * val;
src += num_irows;
}
tgt[orow * num_irows + irow] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varOuterDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, long dimension, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (unsigned dim = 0; dim < dimension; dim++) {
num_orows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x)));
if (flag) {
THCudaTensor_kernel_varOuterDim<true, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
} else {
THCudaTensor_kernel_varOuterDim<false, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
/* Compute the variance (or standard deviation) of the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varInnermostDim(float *tgt, float *src_, unsigned num_rows, unsigned row_size)
{
__shared__ float ssum[32][16];
__shared__ float ssum2[32][16];
for (unsigned block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
float sum = 0, sum2 = 0;
if (row < num_rows) {
float *src = src_ + row * row_size;
// Sequential reduction within a thread.
for (unsigned col = threadIdx.x; col < row_size; col += blockDim.x) {
float val = src[col];
sum += val;
sum2 += val * val;
}
}
ssum[threadIdx.y][threadIdx.x] = sum;
ssum2[threadIdx.y][threadIdx.x] = sum2;
__syncthreads();
// Reduce intermediate values to single value.
for (unsigned s = 8; s > 1; s >>= 1) {
if (row < num_rows && threadIdx.x < s) {
ssum[threadIdx.y][threadIdx.x] += ssum[threadIdx.y][threadIdx.x + s];
ssum2[threadIdx.y][threadIdx.x] += ssum2[threadIdx.y][threadIdx.x + s];
}
__syncthreads();
}
if (row < num_rows && threadIdx.x == 0) {
sum = ssum[threadIdx.y][0] + ssum[threadIdx.y][1];
sum2 = ssum2[threadIdx.y][0] + ssum2[threadIdx.y][1];
tgt[row] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
__syncthreads();
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varInnermostDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, ndim - 1);
// From limited testing, 16x32 seemed a good compromise for handling both long and short dimensions.
dim3 threads(16, 32);
dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y)));
if (flag) {
THCudaTensor_kernel_varInnermostDim<true, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
} else {
THCudaTensor_kernel_varInnermostDim<false, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
void THCudaTensor_var(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<false>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<false>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
void THCudaTensor_std(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<true>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<true>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
struct norm_functor
{
const float exponent;
norm_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x) const
{
return pow(fabs(x), exponent);
}
};
struct partial_not_equal_functor
{
const float rhs;
partial_not_equal_functor(float rhs) : rhs(rhs) {}
__host__ __device__ bool operator()(const float &lhs) const {return lhs != rhs;}
};
float THCudaTensor_normall(THCState *state, THCudaTensor *self, float value)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float result;
if(value == 0.0f) {
result = thrust::transform_reduce(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, partial_not_equal_functor(0.0f),
(float)0, thrust::plus<float>());
} else {
result = thrust::transform_reduce(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, norm_functor(value),
(float)0, thrust::plus<float>());
result = pow(result, (float)1.0/value);
}
THCudaTensor_free(state, self);
return result;
}
void THCudaTensor_norm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (value == 0.0f) {
THCudaTensor_reduceDim(state, self, src,
partial_not_equal_functor(0.0f), thrust::plus<float>(),
0.0f, dimension);
} else {
THCudaTensor_reduceDim(state, self, src,
norm_functor(value), thrust::plus<float>(),
0.0f, dimension);
THCudaTensor_pow(state, self, self, 1/value);
}
THCudaCheck(cudaGetLastError());
}
__global__ void THCudaTensor_kernel_renorm(float *data, const float value, const long size, const float maxnorm)
{
__shared__ float buffer[32];
long tx = threadIdx.x;
long bx = blockIdx.x;
long step = blockDim.x;
float *row = data + size*bx;
buffer[tx] = 0;
// get norm of axis
for (long i=tx; i<size; i+=step)
{
buffer[tx] += pow(fabs(row[i]), value);
}
// add (reduce)
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
// clip norms
__syncthreads();
float norm = pow(buffer[0], 1/value);
if (norm > maxnorm)
{
norm = maxnorm / (norm + 1e-7);
// renormalize
for (long i=tx; i<size; i+=step)
{
row[i] *= norm;
}
}
}
void THCudaTensor_renorm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension, float maxnorm)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor *self_;
THCudaTensor *src_ = THCudaTensor_newTranspose(state, src, dimension, 0);
THCudaTensor *data = THCudaTensor_newClone(state, src_);
long size = THCudaTensor_nElement(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(state, src), 3, "invalid dimension");
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THCudaTensor_nDimension(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCudaTensor_kernel_renorm<<<grid, threads, 0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, data), value, size, maxnorm);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCudaTensor_free(state, src_);
self_ = THCudaTensor_newTranspose(state, data, dimension, 0);
THCudaTensor_resizeAs(state, self, self_);
THCudaTensor_freeCopyTo(state, self_, self);
THCudaTensor_free(state, data);
}
struct dist_functor
{
const float exponent;
dist_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return pow(fabs(x-y), exponent);
}
};
float THCudaTensor_dist(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
src = THCudaTensor_newContiguous(state, src);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
thrust::device_ptr<float> src_data(THCudaTensor_data(state, src));
float result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, (float) 0,
thrust::plus<float>(), dist_functor(value));
THCudaTensor_free(state, src);
THCudaTensor_free(state, self);
return pow(result, (float)1.0/value);
}
void THCudaTensor_rand(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_uniform(state, r_, 0, 1);
}
void THCudaTensor_randn(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_normal(state, r_, 0, 1);
}
|
c99b2cc0a7687acdbbf79857025981c27da2ec38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include<sys/wait.h>
#include<unistd.h>
__global__ void matrixVectorMultCuda(int* row_ptr, int* col_ind, float* values, float* x,int row_size , int col_size, int value_size ,int threadSize);
__host__ void outer_VecMatMult_Started (struct SparseMatrix* sparse_Matrix, float *x, int row_size, int col_size, int value_size, int NUMBER_OF_CUDA_THREADS ,int NUMBER_OF_REPETITION , int OUTPUT_FLAG);
struct SparseMatrix{
int *row_ptr;
int *col_ind;
float *values;
};
struct SparseMatrix* readSparseMatrix (char* arg, int &row_size, int &col_size, int &value_size){
FILE * file;
int i,j, int_tmp;
float float_tmp;
struct SparseMatrix *matrix = (struct SparseMatrix *) malloc(sizeof(struct SparseMatrix));
if ((file = fopen(arg, "r+")) == NULL)
{
printf("ERROR: file open failed\n");
//return(NULL);
}
for(int k = 0; k<3; k++){
fscanf(file,"%d", &int_tmp);
switch( k )
{
case 0:
row_size = int_tmp;
break;
case 1:
col_size = int_tmp;
break;
case 2:
value_size = int_tmp;
break;
}
}
matrix->row_ptr = (int*) malloc(sizeof(int)*value_size);
matrix->col_ind = (int*) malloc(sizeof(int)*value_size);
matrix->values = (float*) malloc(sizeof(float)*value_size);
for (i = 0; i < (value_size); i++){
for (j = 0; j < (3); j++){
switch( j ){
case 0:
fscanf(file,"%d", &int_tmp);
matrix->row_ptr[i] = int_tmp-1;
break;
case 1:
fscanf(file,"%d", &int_tmp);
matrix->col_ind[i] = int_tmp-1;
break;
case 2:
fscanf(file,"%f", &float_tmp);
matrix->values[i] = float_tmp;
break;
}
}
}
return matrix;
}
__global__ void matrixVectorMultCuda(int* row_ptr, int* col_ind, float* values, float* x,int row_size , int col_size, int value_size, int *threadsID_ptr_Mapper, int * numberOfIndexesToProcess){
//printf("ThreadID: %i\n", col_size);
int tid=threadIdx.x+blockIdx.x*blockDim.x;
float sum=0;
//each thread search its matching row id from the list
//thread zero is responsible from row 0 - rowsperthreadsin sparse matrix.
//since the given format for sparse matrix is actually optimized, this for loop
//is also runs on O(n) (non-zero value size);
if(tid<row_size){
for(int r_i=0; r_i<numberOfIndexesToProcess[tid];r_i++){
int tid_x = r_i+threadsID_ptr_Mapper[tid];
sum=0;
//printf("TID[%i]- %i - ",tid,tid_x );
for(int i = 0; i<value_size; i++){
if(row_ptr[i]==tid_x){
sum += (float) (x[tid_x]*values[i]);
}
}
//printf("[%e]\n",sum );
__syncthreads();
x[tid_x]=sum;
}
}
//printf("%i\n",tid);
// All threads will wait till they come to this point
// We are now quite confident that all array values are updated.
}
__host__ void outer_VecMatMult_Started(struct SparseMatrix* sparse_Matrix, float *x, int row_size, int col_size, int value_size,
int NUMBER_OF_CUDA_THREADS ,int NUMBER_OF_REPETITION , int OUTPUT_FLAG){
int *dev_row_ptr, *dev_col_ind, *dev_threadsID_ptr_Mapper,*dev_numberOfIndexesToProcess;
float * dev_values, *dev_x;
int size = sizeof(int)*value_size;
int size_f = sizeof(float)*value_size;
int size_f_x = sizeof(float)* row_size;
if(OUTPUT_FLAG==1){
printf("Initial Matrix:\n");
printf(" Values Array: [ %e",x[0]);
for(int i=1; i<value_size;i++){
printf(", %e",sparse_Matrix->values[i]);
}
printf("]\n Col_Ind Array: [ %i",sparse_Matrix->col_ind[0]);
for(int i=1; i<value_size;i++){
printf(", %i",sparse_Matrix->col_ind[i]);
}
printf("]\n Row_Ptr Array: [ %i",sparse_Matrix->row_ptr[0]);
for(int i=1; i<value_size;i++){
printf(", %i",sparse_Matrix->row_ptr[i]);
}
printf("]\nVector: [ %e",x[0] );
for(int i=1; i<value_size;i++){
printf(", %e",x[i]);
}
printf("]\n");
}
int partitionSize=sizeof(int)*NUMBER_OF_CUDA_THREADS;
int *threadsID_ptr_Mapper =(int*) malloc(partitionSize);
int *numberOfIndexesToProcess = (int*) malloc(partitionSize);
int threadSize,threadSizeRemaining;
if(NUMBER_OF_CUDA_THREADS>row_size){
threadSize = 1;
threadSizeRemaining = 0;
threadsID_ptr_Mapper[0] = 0;
numberOfIndexesToProcess[0] = threadSize;
for(int i = 1; i< NUMBER_OF_CUDA_THREADS; i++){
threadsID_ptr_Mapper[i] = threadsID_ptr_Mapper[i-1] + threadSize;
numberOfIndexesToProcess[i] = threadSize;
//printf("size[%i]: %i - %i\n", i, threadsID_ptr_Mapper[i] ,numberOfIndexesToProcess[i]);
}
}
else{
threadSize = value_size/NUMBER_OF_CUDA_THREADS;
threadSizeRemaining = value_size%NUMBER_OF_CUDA_THREADS;
threadsID_ptr_Mapper[0] = 0;
numberOfIndexesToProcess[0] = threadSize;
for(int i = 1; i< NUMBER_OF_CUDA_THREADS; i++){
if(i<NUMBER_OF_CUDA_THREADS-1){
threadsID_ptr_Mapper[i] = threadsID_ptr_Mapper[i-1] + threadSize;
numberOfIndexesToProcess[i] = threadSize;
}
else{
threadsID_ptr_Mapper[i] = threadsID_ptr_Mapper[i-1] + threadSize;
numberOfIndexesToProcess[i] = threadSize+threadSizeRemaining ;
}
//printf("size[%i]: %i - %i\n", i, threadsID_ptr_Mapper[i] ,numberOfIndexesToProcess[i]);
}
}
hipMalloc((void**)&dev_numberOfIndexesToProcess, partitionSize);
hipMalloc((void**)&dev_threadsID_ptr_Mapper, partitionSize);
hipMalloc((void**)&dev_row_ptr, size);
hipMalloc((void**)&dev_col_ind, size);
hipMalloc((void**)&dev_values, size_f);
hipMalloc((void**)&dev_x, size_f_x);
hipMemcpy(dev_row_ptr, sparse_Matrix->row_ptr, size,hipMemcpyHostToDevice);
hipMemcpy(dev_col_ind, sparse_Matrix->col_ind, size,hipMemcpyHostToDevice);
hipMemcpy(dev_values, sparse_Matrix->values, size,hipMemcpyHostToDevice);
hipMemcpy(dev_threadsID_ptr_Mapper, threadsID_ptr_Mapper, partitionSize,hipMemcpyHostToDevice);
hipMemcpy(dev_numberOfIndexesToProcess, numberOfIndexesToProcess, partitionSize,hipMemcpyHostToDevice);
dim3 threadsPerBlock(1, NUMBER_OF_CUDA_THREADS);
for(int i=0; i<NUMBER_OF_REPETITION; i++){
hipMemcpy(dev_x, x, size_f_x,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrixVectorMultCuda), dim3(1),dim3(NUMBER_OF_CUDA_THREADS), 0, 0, dev_row_ptr,dev_col_ind,dev_values,dev_x,row_size,col_size,value_size,dev_threadsID_ptr_Mapper,dev_numberOfIndexesToProcess);
hipMemcpy(x, dev_x, size_f_x,hipMemcpyDeviceToHost);
//printf("Iteration %i out of %i Completed\n",i+1,NUMBER_OF_REPETITION);
}
if(OUTPUT_FLAG==1||OUTPUT_FLAG==2){
printf("Resulting Vector: [ %e",x[0] );
for(int i=1; i<value_size;i++){
printf(", %e",x[i]);
}
printf("]\n");
}
hipFree(dev_numberOfIndexesToProcess);
hipFree(dev_threadsID_ptr_Mapper);
hipFree(dev_row_ptr);
hipFree(dev_col_ind);
hipFree(dev_values);
hipFree(dev_x);
free(threadsID_ptr_Mapper);threadsID_ptr_Mapper=NULL;
free(numberOfIndexesToProcess);numberOfIndexesToProcess=NULL;
}
int main(int argc, char *argv[]){
clock_t start = clock(), diff;
int row_size,col_size,value_size;
struct SparseMatrix * sparse_Matrix = readSparseMatrix (argv[4],row_size,col_size,value_size);
float *x = (float *) malloc(sizeof(float)*row_size);
for(int i = 0 ; i < row_size ; i++){
x[i]=1;
}
/*printf("%i\n",row_size);
printf("%i\n",col_size);
printf("%i\n",value_size);
printf("%i\n",sparse_Matrix->row_ptr[value_size-2]);
//Reading Finished*/
//Cuda zone
/* initialization */
int NUMBER_OF_CUDA_THREADS = atoi(argv[1]);
if(NUMBER_OF_CUDA_THREADS>1024){
printf("The number of allowed cuda threads is 1024!\nSetting the threads number automatically to 1024.\n------ Resetting the program! ------\n");
NUMBER_OF_CUDA_THREADS = 1024;
sleep(1);
}
int NUMBER_OF_REPETITION = atoi(argv[2]);
int OUTPUT_FLAG = atoi(argv[3]);
outer_VecMatMult_Started(sparse_Matrix, x, row_size, col_size, value_size, NUMBER_OF_CUDA_THREADS ,NUMBER_OF_REPETITION , OUTPUT_FLAG);
free(sparse_Matrix);
sparse_Matrix= NULL;
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("----\nTime taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
return 0;
}
| c99b2cc0a7687acdbbf79857025981c27da2ec38.cu | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include<sys/wait.h>
#include<unistd.h>
__global__ void matrixVectorMultCuda(int* row_ptr, int* col_ind, float* values, float* x,int row_size , int col_size, int value_size ,int threadSize);
__host__ void outer_VecMatMult_Started (struct SparseMatrix* sparse_Matrix, float *x, int row_size, int col_size, int value_size, int NUMBER_OF_CUDA_THREADS ,int NUMBER_OF_REPETITION , int OUTPUT_FLAG);
struct SparseMatrix{
int *row_ptr;
int *col_ind;
float *values;
};
struct SparseMatrix* readSparseMatrix (char* arg, int &row_size, int &col_size, int &value_size){
FILE * file;
int i,j, int_tmp;
float float_tmp;
struct SparseMatrix *matrix = (struct SparseMatrix *) malloc(sizeof(struct SparseMatrix));
if ((file = fopen(arg, "r+")) == NULL)
{
printf("ERROR: file open failed\n");
//return(NULL);
}
for(int k = 0; k<3; k++){
fscanf(file,"%d", &int_tmp);
switch( k )
{
case 0:
row_size = int_tmp;
break;
case 1:
col_size = int_tmp;
break;
case 2:
value_size = int_tmp;
break;
}
}
matrix->row_ptr = (int*) malloc(sizeof(int)*value_size);
matrix->col_ind = (int*) malloc(sizeof(int)*value_size);
matrix->values = (float*) malloc(sizeof(float)*value_size);
for (i = 0; i < (value_size); i++){
for (j = 0; j < (3); j++){
switch( j ){
case 0:
fscanf(file,"%d", &int_tmp);
matrix->row_ptr[i] = int_tmp-1;
break;
case 1:
fscanf(file,"%d", &int_tmp);
matrix->col_ind[i] = int_tmp-1;
break;
case 2:
fscanf(file,"%f", &float_tmp);
matrix->values[i] = float_tmp;
break;
}
}
}
return matrix;
}
__global__ void matrixVectorMultCuda(int* row_ptr, int* col_ind, float* values, float* x,int row_size , int col_size, int value_size, int *threadsID_ptr_Mapper, int * numberOfIndexesToProcess){
//printf("ThreadID: %i\n", col_size);
int tid=threadIdx.x+blockIdx.x*blockDim.x;
float sum=0;
//each thread search its matching row id from the list
//thread zero is responsible from row 0 - rowsperthreadsin sparse matrix.
//since the given format for sparse matrix is actually optimized, this for loop
//is also runs on O(n) (non-zero value size);
if(tid<row_size){
for(int r_i=0; r_i<numberOfIndexesToProcess[tid];r_i++){
int tid_x = r_i+threadsID_ptr_Mapper[tid];
sum=0;
//printf("TID[%i]- %i - ",tid,tid_x );
for(int i = 0; i<value_size; i++){
if(row_ptr[i]==tid_x){
sum += (float) (x[tid_x]*values[i]);
}
}
//printf("[%e]\n",sum );
__syncthreads();
x[tid_x]=sum;
}
}
//printf("%i\n",tid);
// All threads will wait till they come to this point
// We are now quite confident that all array values are updated.
}
__host__ void outer_VecMatMult_Started(struct SparseMatrix* sparse_Matrix, float *x, int row_size, int col_size, int value_size,
int NUMBER_OF_CUDA_THREADS ,int NUMBER_OF_REPETITION , int OUTPUT_FLAG){
int *dev_row_ptr, *dev_col_ind, *dev_threadsID_ptr_Mapper,*dev_numberOfIndexesToProcess;
float * dev_values, *dev_x;
int size = sizeof(int)*value_size;
int size_f = sizeof(float)*value_size;
int size_f_x = sizeof(float)* row_size;
if(OUTPUT_FLAG==1){
printf("Initial Matrix:\n");
printf(" Values Array: [ %e",x[0]);
for(int i=1; i<value_size;i++){
printf(", %e",sparse_Matrix->values[i]);
}
printf("]\n Col_Ind Array: [ %i",sparse_Matrix->col_ind[0]);
for(int i=1; i<value_size;i++){
printf(", %i",sparse_Matrix->col_ind[i]);
}
printf("]\n Row_Ptr Array: [ %i",sparse_Matrix->row_ptr[0]);
for(int i=1; i<value_size;i++){
printf(", %i",sparse_Matrix->row_ptr[i]);
}
printf("]\nVector: [ %e",x[0] );
for(int i=1; i<value_size;i++){
printf(", %e",x[i]);
}
printf("]\n");
}
int partitionSize=sizeof(int)*NUMBER_OF_CUDA_THREADS;
int *threadsID_ptr_Mapper =(int*) malloc(partitionSize);
int *numberOfIndexesToProcess = (int*) malloc(partitionSize);
int threadSize,threadSizeRemaining;
if(NUMBER_OF_CUDA_THREADS>row_size){
threadSize = 1;
threadSizeRemaining = 0;
threadsID_ptr_Mapper[0] = 0;
numberOfIndexesToProcess[0] = threadSize;
for(int i = 1; i< NUMBER_OF_CUDA_THREADS; i++){
threadsID_ptr_Mapper[i] = threadsID_ptr_Mapper[i-1] + threadSize;
numberOfIndexesToProcess[i] = threadSize;
//printf("size[%i]: %i - %i\n", i, threadsID_ptr_Mapper[i] ,numberOfIndexesToProcess[i]);
}
}
else{
threadSize = value_size/NUMBER_OF_CUDA_THREADS;
threadSizeRemaining = value_size%NUMBER_OF_CUDA_THREADS;
threadsID_ptr_Mapper[0] = 0;
numberOfIndexesToProcess[0] = threadSize;
for(int i = 1; i< NUMBER_OF_CUDA_THREADS; i++){
if(i<NUMBER_OF_CUDA_THREADS-1){
threadsID_ptr_Mapper[i] = threadsID_ptr_Mapper[i-1] + threadSize;
numberOfIndexesToProcess[i] = threadSize;
}
else{
threadsID_ptr_Mapper[i] = threadsID_ptr_Mapper[i-1] + threadSize;
numberOfIndexesToProcess[i] = threadSize+threadSizeRemaining ;
}
//printf("size[%i]: %i - %i\n", i, threadsID_ptr_Mapper[i] ,numberOfIndexesToProcess[i]);
}
}
cudaMalloc((void**)&dev_numberOfIndexesToProcess, partitionSize);
cudaMalloc((void**)&dev_threadsID_ptr_Mapper, partitionSize);
cudaMalloc((void**)&dev_row_ptr, size);
cudaMalloc((void**)&dev_col_ind, size);
cudaMalloc((void**)&dev_values, size_f);
cudaMalloc((void**)&dev_x, size_f_x);
cudaMemcpy(dev_row_ptr, sparse_Matrix->row_ptr, size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_col_ind, sparse_Matrix->col_ind, size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_values, sparse_Matrix->values, size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_threadsID_ptr_Mapper, threadsID_ptr_Mapper, partitionSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_numberOfIndexesToProcess, numberOfIndexesToProcess, partitionSize,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(1, NUMBER_OF_CUDA_THREADS);
for(int i=0; i<NUMBER_OF_REPETITION; i++){
cudaMemcpy(dev_x, x, size_f_x,cudaMemcpyHostToDevice);
matrixVectorMultCuda<<<1,NUMBER_OF_CUDA_THREADS>>>(dev_row_ptr,dev_col_ind,dev_values,dev_x,row_size,col_size,value_size,dev_threadsID_ptr_Mapper,dev_numberOfIndexesToProcess);
cudaMemcpy(x, dev_x, size_f_x,cudaMemcpyDeviceToHost);
//printf("Iteration %i out of %i Completed\n",i+1,NUMBER_OF_REPETITION);
}
if(OUTPUT_FLAG==1||OUTPUT_FLAG==2){
printf("Resulting Vector: [ %e",x[0] );
for(int i=1; i<value_size;i++){
printf(", %e",x[i]);
}
printf("]\n");
}
cudaFree(dev_numberOfIndexesToProcess);
cudaFree(dev_threadsID_ptr_Mapper);
cudaFree(dev_row_ptr);
cudaFree(dev_col_ind);
cudaFree(dev_values);
cudaFree(dev_x);
free(threadsID_ptr_Mapper);threadsID_ptr_Mapper=NULL;
free(numberOfIndexesToProcess);numberOfIndexesToProcess=NULL;
}
int main(int argc, char *argv[]){
clock_t start = clock(), diff;
int row_size,col_size,value_size;
struct SparseMatrix * sparse_Matrix = readSparseMatrix (argv[4],row_size,col_size,value_size);
float *x = (float *) malloc(sizeof(float)*row_size);
for(int i = 0 ; i < row_size ; i++){
x[i]=1;
}
/*printf("%i\n",row_size);
printf("%i\n",col_size);
printf("%i\n",value_size);
printf("%i\n",sparse_Matrix->row_ptr[value_size-2]);
//Reading Finished*/
//Cuda zone
/* initialization */
int NUMBER_OF_CUDA_THREADS = atoi(argv[1]);
if(NUMBER_OF_CUDA_THREADS>1024){
printf("The number of allowed cuda threads is 1024!\nSetting the threads number automatically to 1024.\n------ Resetting the program! ------\n");
NUMBER_OF_CUDA_THREADS = 1024;
sleep(1);
}
int NUMBER_OF_REPETITION = atoi(argv[2]);
int OUTPUT_FLAG = atoi(argv[3]);
outer_VecMatMult_Started(sparse_Matrix, x, row_size, col_size, value_size, NUMBER_OF_CUDA_THREADS ,NUMBER_OF_REPETITION , OUTPUT_FLAG);
free(sparse_Matrix);
sparse_Matrix= NULL;
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("----\nTime taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
return 0;
}
|
4da55be620a975df96f57a91e1c47700e400c1a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "THZCApply.cuh"
// #include <thrust/complex.h>
// typedef thrust::complex<float> ccx;
static inline int curGPU() {
int curDev;
THZCudaCheck(hipGetDevice(&curDev));
return curDev;
}
THZC_API void
THZCudaTensor_copy(THCState* state, THZCudaTensor* dst, THZCudaTensor* src) {
long totalElements = THZCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THZCudaTensor_nElement(state, src), 2,
"sizes do not match");
if (THZCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool srcContig = THZCudaTensor_isContiguous(state, src);
bool dstContig = THZCudaTensor_isContiguous(state, dst);
bool memcpyEligible = (srcContig && dstContig) || (totalElements == 1);
int oldDev = curGPU();
int srcDev = THZCudaTensor_getDevice(state, src);
int dstDev = THZCudaTensor_getDevice(state, dst);
// empirically, running the kernel on the device that holds the
// non-contiguous tensor is faster by 5-10x
int copyDev = dstContig ? srcDev : dstDev;
int remoteDev = dstContig ? dstDev : srcDev;
if (srcDev == dstDev) {
if (oldDev != srcDev) {
THZCudaCheck(hipSetDevice(srcDev));
}
} else {
// synchronize remote device before copy
hipEvent_t dataReady;
THZCudaCheck(hipSetDevice(remoteDev));
THZCudaCheck(hipEventCreate(&dataReady));
THZCudaCheck(hipEventRecord(
dataReady,
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(hipSetDevice(copyDev));
THZCudaCheck(hipStreamWaitEvent(
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state)),
dataReady, 0));
THZCudaCheck(hipEventDestroy(dataReady));
}
if (memcpyEligible) {
THZCudaCheck(hipMemcpyAsync(THZCudaTensor_data(state, dst),
THZCudaTensor_data(state, src),
totalElements * sizeof(cux),
hipMemcpyDeviceToDevice,
THCState_getCurrentStream(state)));
} else {
bool succ =
THZCudaTensor_pointwiseApply2(state, dst, src, ZCopyOp<ccx>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
}
if (srcDev != dstDev) {
// synchronize remote device after copy
hipEvent_t doneCopying;
THZCudaCheck(hipEventCreate(&doneCopying));
THZCudaCheck(hipEventRecord(
doneCopying,
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(hipSetDevice(remoteDev));
THZCudaCheck(hipStreamWaitEvent(
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state)),
doneCopying, 0));
THZCudaCheck(hipEventDestroy(doneCopying));
}
if (curGPU() != oldDev) {
THZCudaCheck(hipSetDevice(oldDev));
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
THZC_API void
THZCudaTensor_copyIm(THCState* state, THZCudaTensor* dst, THCudaTensor* src) {
long totalElements = THZCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THCudaTensor_nElement(state, src), 2,
"sizes do not match");
if (THZCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool srcContig = THCudaTensor_isContiguous(state, src);
bool dstContig = THZCudaTensor_isContiguous(state, dst);
// bool memcpyEligible = (srcContig && dstContig) || (totalElements == 1);
int oldDev = curGPU();
int srcDev = THCudaTensor_getDevice(state, src);
int dstDev = THZCudaTensor_getDevice(state, dst);
// empirically, running the kernel on the device that holds the
// non-contiguous tensor is faster by 5-10x
int copyDev = dstContig ? srcDev : dstDev;
int remoteDev = dstContig ? dstDev : srcDev;
if (srcDev == dstDev) {
if (oldDev != srcDev) {
THZCudaCheck(hipSetDevice(srcDev));
}
} else {
// synchronize remote device before copy
hipEvent_t dataReady;
THZCudaCheck(hipSetDevice(remoteDev));
THZCudaCheck(hipEventCreate(&dataReady));
THZCudaCheck(hipEventRecord(
dataReady,
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(hipSetDevice(copyDev));
THZCudaCheck(hipStreamWaitEvent(
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state)),
dataReady, 0));
THZCudaCheck(hipEventDestroy(dataReady));
}
bool succ = THZCudaTensor_pointwiseApply2ZF(state, dst, src, CopyImOp());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
if (srcDev != dstDev) {
// synchronize remote device after copy
hipEvent_t doneCopying;
THZCudaCheck(hipEventCreate(&doneCopying));
THZCudaCheck(hipEventRecord(
doneCopying,
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(hipSetDevice(remoteDev));
THZCudaCheck(hipStreamWaitEvent(
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state)),
doneCopying, 0));
THZCudaCheck(hipEventDestroy(doneCopying));
}
if (curGPU() != oldDev) {
THZCudaCheck(hipSetDevice(oldDev));
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
THZC_API void
THZCudaTensor_copyRe(THCState* state, THZCudaTensor* dst, THCudaTensor* src) {
long totalElements = THZCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THCudaTensor_nElement(state, src), 2,
"sizes do not match");
if (THZCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool srcContig = THCudaTensor_isContiguous(state, src);
bool dstContig = THZCudaTensor_isContiguous(state, dst);
// bool memcpyEligible = (srcContig && dstContig) || (totalElements == 1);
int oldDev = curGPU();
int srcDev = THCudaTensor_getDevice(state, src);
int dstDev = THZCudaTensor_getDevice(state, dst);
// empirically, running the kernel on the device that holds the
// non-contiguous tensor is faster by 5-10x
int copyDev = dstContig ? srcDev : dstDev;
int remoteDev = dstContig ? dstDev : srcDev;
if (srcDev == dstDev) {
if (oldDev != srcDev) {
THZCudaCheck(hipSetDevice(srcDev));
}
} else {
// synchronize remote device before copy
hipEvent_t dataReady;
THZCudaCheck(hipSetDevice(remoteDev));
THZCudaCheck(hipEventCreate(&dataReady));
THZCudaCheck(hipEventRecord(
dataReady,
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(hipSetDevice(copyDev));
THZCudaCheck(hipStreamWaitEvent(
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state)),
dataReady, 0));
THZCudaCheck(hipEventDestroy(dataReady));
}
bool succ = THZCudaTensor_pointwiseApply2ZF(state, dst, src, CopyReOp());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
if (srcDev != dstDev) {
// synchronize remote device after copy
hipEvent_t doneCopying;
THZCudaCheck(hipEventCreate(&doneCopying));
THZCudaCheck(hipEventRecord(
doneCopying,
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(hipSetDevice(remoteDev));
THZCudaCheck(hipStreamWaitEvent(
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state)),
doneCopying, 0));
THZCudaCheck(hipEventDestroy(doneCopying));
}
if (curGPU() != oldDev) {
THZCudaCheck(hipSetDevice(oldDev));
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
| 4da55be620a975df96f57a91e1c47700e400c1a2.cu | #include "THZCApply.cuh"
// #include <thrust/complex.h>
// typedef thrust::complex<float> ccx;
static inline int curGPU() {
int curDev;
THZCudaCheck(cudaGetDevice(&curDev));
return curDev;
}
THZC_API void
THZCudaTensor_copy(THCState* state, THZCudaTensor* dst, THZCudaTensor* src) {
long totalElements = THZCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THZCudaTensor_nElement(state, src), 2,
"sizes do not match");
if (THZCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool srcContig = THZCudaTensor_isContiguous(state, src);
bool dstContig = THZCudaTensor_isContiguous(state, dst);
bool memcpyEligible = (srcContig && dstContig) || (totalElements == 1);
int oldDev = curGPU();
int srcDev = THZCudaTensor_getDevice(state, src);
int dstDev = THZCudaTensor_getDevice(state, dst);
// empirically, running the kernel on the device that holds the
// non-contiguous tensor is faster by 5-10x
int copyDev = dstContig ? srcDev : dstDev;
int remoteDev = dstContig ? dstDev : srcDev;
if (srcDev == dstDev) {
if (oldDev != srcDev) {
THZCudaCheck(cudaSetDevice(srcDev));
}
} else {
// synchronize remote device before copy
cudaEvent_t dataReady;
THZCudaCheck(cudaSetDevice(remoteDev));
THZCudaCheck(cudaEventCreate(&dataReady));
THZCudaCheck(cudaEventRecord(
dataReady,
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(cudaSetDevice(copyDev));
THZCudaCheck(cudaStreamWaitEvent(
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state)),
dataReady, 0));
THZCudaCheck(cudaEventDestroy(dataReady));
}
if (memcpyEligible) {
THZCudaCheck(cudaMemcpyAsync(THZCudaTensor_data(state, dst),
THZCudaTensor_data(state, src),
totalElements * sizeof(cux),
cudaMemcpyDeviceToDevice,
THCState_getCurrentStream(state)));
} else {
bool succ =
THZCudaTensor_pointwiseApply2(state, dst, src, ZCopyOp<ccx>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
}
if (srcDev != dstDev) {
// synchronize remote device after copy
cudaEvent_t doneCopying;
THZCudaCheck(cudaEventCreate(&doneCopying));
THZCudaCheck(cudaEventRecord(
doneCopying,
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(cudaSetDevice(remoteDev));
THZCudaCheck(cudaStreamWaitEvent(
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state)),
doneCopying, 0));
THZCudaCheck(cudaEventDestroy(doneCopying));
}
if (curGPU() != oldDev) {
THZCudaCheck(cudaSetDevice(oldDev));
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
THZC_API void
THZCudaTensor_copyIm(THCState* state, THZCudaTensor* dst, THCudaTensor* src) {
long totalElements = THZCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THCudaTensor_nElement(state, src), 2,
"sizes do not match");
if (THZCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool srcContig = THCudaTensor_isContiguous(state, src);
bool dstContig = THZCudaTensor_isContiguous(state, dst);
// bool memcpyEligible = (srcContig && dstContig) || (totalElements == 1);
int oldDev = curGPU();
int srcDev = THCudaTensor_getDevice(state, src);
int dstDev = THZCudaTensor_getDevice(state, dst);
// empirically, running the kernel on the device that holds the
// non-contiguous tensor is faster by 5-10x
int copyDev = dstContig ? srcDev : dstDev;
int remoteDev = dstContig ? dstDev : srcDev;
if (srcDev == dstDev) {
if (oldDev != srcDev) {
THZCudaCheck(cudaSetDevice(srcDev));
}
} else {
// synchronize remote device before copy
cudaEvent_t dataReady;
THZCudaCheck(cudaSetDevice(remoteDev));
THZCudaCheck(cudaEventCreate(&dataReady));
THZCudaCheck(cudaEventRecord(
dataReady,
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(cudaSetDevice(copyDev));
THZCudaCheck(cudaStreamWaitEvent(
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state)),
dataReady, 0));
THZCudaCheck(cudaEventDestroy(dataReady));
}
bool succ = THZCudaTensor_pointwiseApply2ZF(state, dst, src, CopyImOp());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
if (srcDev != dstDev) {
// synchronize remote device after copy
cudaEvent_t doneCopying;
THZCudaCheck(cudaEventCreate(&doneCopying));
THZCudaCheck(cudaEventRecord(
doneCopying,
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(cudaSetDevice(remoteDev));
THZCudaCheck(cudaStreamWaitEvent(
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state)),
doneCopying, 0));
THZCudaCheck(cudaEventDestroy(doneCopying));
}
if (curGPU() != oldDev) {
THZCudaCheck(cudaSetDevice(oldDev));
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
THZC_API void
THZCudaTensor_copyRe(THCState* state, THZCudaTensor* dst, THCudaTensor* src) {
long totalElements = THZCudaTensor_nElement(state, dst);
THArgCheck(totalElements == THCudaTensor_nElement(state, src), 2,
"sizes do not match");
if (THZCudaTensor_nDimension(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is contiguous).
bool srcContig = THCudaTensor_isContiguous(state, src);
bool dstContig = THZCudaTensor_isContiguous(state, dst);
// bool memcpyEligible = (srcContig && dstContig) || (totalElements == 1);
int oldDev = curGPU();
int srcDev = THCudaTensor_getDevice(state, src);
int dstDev = THZCudaTensor_getDevice(state, dst);
// empirically, running the kernel on the device that holds the
// non-contiguous tensor is faster by 5-10x
int copyDev = dstContig ? srcDev : dstDev;
int remoteDev = dstContig ? dstDev : srcDev;
if (srcDev == dstDev) {
if (oldDev != srcDev) {
THZCudaCheck(cudaSetDevice(srcDev));
}
} else {
// synchronize remote device before copy
cudaEvent_t dataReady;
THZCudaCheck(cudaSetDevice(remoteDev));
THZCudaCheck(cudaEventCreate(&dataReady));
THZCudaCheck(cudaEventRecord(
dataReady,
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(cudaSetDevice(copyDev));
THZCudaCheck(cudaStreamWaitEvent(
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state)),
dataReady, 0));
THZCudaCheck(cudaEventDestroy(dataReady));
}
bool succ = THZCudaTensor_pointwiseApply2ZF(state, dst, src, CopyReOp());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
if (srcDev != dstDev) {
// synchronize remote device after copy
cudaEvent_t doneCopying;
THZCudaCheck(cudaEventCreate(&doneCopying));
THZCudaCheck(cudaEventRecord(
doneCopying,
THCState_getDeviceStream(state, copyDev, THCState_getCurrentStreamIndex(state))));
THZCudaCheck(cudaSetDevice(remoteDev));
THZCudaCheck(cudaStreamWaitEvent(
THCState_getDeviceStream(state, remoteDev, THCState_getCurrentStreamIndex(state)),
doneCopying, 0));
THZCudaCheck(cudaEventDestroy(doneCopying));
}
if (curGPU() != oldDev) {
THZCudaCheck(cudaSetDevice(oldDev));
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
|
update_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 30-May-2011 22:03:11
//
// user function
__device__
#include "update.h"
// CUDA kernel function
__global__ void op_cuda_update(
double *arg0,
double *arg1,
double *arg2,
double *arg3,
double *arg4,
int offset_s,
int set_size ) {
double arg0_l[4];
double arg1_l[4];
double arg2_l[4];
double arg4_l[1];
for (int d=0; d<1; d++) arg4_l[d]=ZERO_double;
int tid = threadIdx.x%OP_WARPSIZE;
extern __shared__ char shared[];
char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE);
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
int offset = n - tid;
int nelems = MIN(OP_WARPSIZE,set_size-offset);
// copy data into shared memory, then into local
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg0_l[m] = ((double *)arg_s)[m+tid*4];
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg2[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg2_l[m] = ((double *)arg_s)[m+tid*4];
// user-supplied kernel call
update( arg0_l,
arg1_l,
arg2_l,
arg3+n,
arg4_l );
// copy back into shared memory, then to device
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg1_l[m];
for (int m=0; m<4; m++)
arg1[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg2_l[m];
for (int m=0; m<4; m++)
arg2[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
}
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
// host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
double *arg4h = (double *)arg4.data;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update \n");
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((double *)arg4.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
// work out shared memory requirements per element
int nshared = 0;
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = MAX(nshared*nthread,reduct_size*nthread);
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_update execution failed\n");
// transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1];
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(4);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (double)set->size * arg0.size;
OP_kernels[4].transfer += (double)set->size * arg1.size;
OP_kernels[4].transfer += (double)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (double)set->size * arg3.size;
}
| update_kernel.cu | //
// auto-generated by op2.m on 30-May-2011 22:03:11
//
// user function
__device__
#include "update.h"
// CUDA kernel function
__global__ void op_cuda_update(
double *arg0,
double *arg1,
double *arg2,
double *arg3,
double *arg4,
int offset_s,
int set_size ) {
double arg0_l[4];
double arg1_l[4];
double arg2_l[4];
double arg4_l[1];
for (int d=0; d<1; d++) arg4_l[d]=ZERO_double;
int tid = threadIdx.x%OP_WARPSIZE;
extern __shared__ char shared[];
char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE);
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
int offset = n - tid;
int nelems = MIN(OP_WARPSIZE,set_size-offset);
// copy data into shared memory, then into local
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg0_l[m] = ((double *)arg_s)[m+tid*4];
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg2[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg2_l[m] = ((double *)arg_s)[m+tid*4];
// user-supplied kernel call
update( arg0_l,
arg1_l,
arg2_l,
arg3+n,
arg4_l );
// copy back into shared memory, then to device
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg1_l[m];
for (int m=0; m<4; m++)
arg1[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg2_l[m];
for (int m=0; m<4; m++)
arg2[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
}
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
// host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
double *arg4h = (double *)arg4.data;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update \n");
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((double *)arg4.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
// work out shared memory requirements per element
int nshared = 0;
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = MAX(nshared*nthread,reduct_size*nthread);
op_cuda_update<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_update execution failed\n");
// transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1];
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(4);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (double)set->size * arg0.size;
OP_kernels[4].transfer += (double)set->size * arg1.size;
OP_kernels[4].transfer += (double)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (double)set->size * arg3.size;
}
|
04dcedab71a4797272f6395e58f11665733c096b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
texture<float, 1, hipReadModeElementType> tex_sin;
texture<float, 1, hipReadModeElementType> tex_sin2;
texture<float, 1, hipReadModeElementType> tex_sin3;
__global__ void interp( float* out, float *out2, int N ) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx < N ) {
// interpolated texture lookup
out[idx] = tex1D( tex_sin3 , (float) (idx) / N );
out2[idx] = tex1D( tex_sin, (float) (idx) / N );
// float a1 = tex1Dfetch( tex_sin2, idx/4 );
// out2[idx] = a1;
}
} | 04dcedab71a4797272f6395e58f11665733c096b.cu | #include "includes.h"
texture<float, 1, cudaReadModeElementType> tex_sin;
texture<float, 1, cudaReadModeElementType> tex_sin2;
texture<float, 1, cudaReadModeElementType> tex_sin3;
__global__ void interp( float* out, float *out2, int N ) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx < N ) {
// interpolated texture lookup
out[idx] = tex1D( tex_sin3 , (float) (idx) / N );
out2[idx] = tex1D( tex_sin, (float) (idx) / N );
// float a1 = tex1Dfetch( tex_sin2, idx/4 );
// out2[idx] = a1;
}
} |
5b1500bca7fbc123f6f192e2295a3867d5591005.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zlobpcg_shift.cu, normal z -> c, Sun Nov 20 20:20:39 2016
*/
#include "magmasparse_internal.h"
__global__ void
magma_clobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaFloatComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloatComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_clobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| 5b1500bca7fbc123f6f192e2295a3867d5591005.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zlobpcg_shift.cu, normal z -> c, Sun Nov 20 20:20:39 2016
*/
#include "magmasparse_internal.h"
__global__ void
magma_clobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaFloatComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloatComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_clobpcg_shift_kernel<<< grid, block, Ms, queue->cuda_stream() >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
7ebaeb2cf9e7d85746bb1e9231ca871fd822c964.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `standard deviation`
#include "reduction_functions_hip.cuh"
#include "compound.cuh"
// @param[in] ddof Delta Degrees of Freedom used for `std`, `var`.
// The divisor used in calculations is N - ddof, where N represents the number of elements.
gdf_scalar cudf::reduction::standard_deviation(gdf_column const& col, gdf_dtype const output_dtype, gdf_size_type ddof, hipStream_t stream)
{
using reducer = cudf::reduction::compound::element_type_dispatcher<cudf::reduction::op::standard_deviation>;
return cudf::type_dispatcher(col.dtype, reducer(), col, output_dtype, ddof, stream);
}
| 7ebaeb2cf9e7d85746bb1e9231ca871fd822c964.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `standard deviation`
#include "reduction_functions.cuh"
#include "compound.cuh"
// @param[in] ddof Delta Degrees of Freedom used for `std`, `var`.
// The divisor used in calculations is N - ddof, where N represents the number of elements.
gdf_scalar cudf::reduction::standard_deviation(gdf_column const& col, gdf_dtype const output_dtype, gdf_size_type ddof, cudaStream_t stream)
{
using reducer = cudf::reduction::compound::element_type_dispatcher<cudf::reduction::op::standard_deviation>;
return cudf::type_dispatcher(col.dtype, reducer(), col, output_dtype, ddof, stream);
}
|
f158520aa289da08e6d056bea439e4c4cf7dc333.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <hip/hip_runtime.h>
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(const int argc, const char **argv);
void testBandwidth( unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, (const char**)argv);
cutilExit(argc, argv);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
void runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PAGEABLE;
//process command line args
if(cutCheckCmdLineFlag( argc, argv, "help"))
{
printHelp();
return;
}
if(cutCheckCmdLineFlag( argc, argv, "csv"))
{
printmode = CSV;
}
if( cutGetCmdLineArgumentstr(argc, argv, "memory", &memModeStr) )
{
if( strcmp(memModeStr, "pageable") == 0 )
{
memMode = PAGEABLE;
}
else if( strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
printf("Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default - pageable memory
memMode = PAGEABLE;
}
if( cutGetCmdLineArgumentstr(argc, argv, "device", &device) )
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if( deviceCount == 0 )
{
printf("!!!!!No devices found!!!!!\n");
return;
}
if( strcmp (device, "all") == 0 )
{
printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if( startDevice >= deviceCount || startDevice < 0)
{
printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
printf("Running on......\n");
for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, currentDevice);
printf (" device %d:%s\n", currentDevice,deviceProp.name);
}
if( cutGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) )
{
//figure out the mode
if( strcmp(modeStr, "quick") == 0 )
{
mode = QUICK_MODE;
}
else if( strcmp(modeStr, "shmoo") == 0 )
{
mode = SHMOO_MODE;
}
else if( strcmp(modeStr, "range") == 0 )
{
mode = RANGE_MODE;
}
else
{
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default mode - quick
mode = QUICK_MODE;
}
if(cutCheckCmdLineFlag( argc, argv, "htod"))
htod = true;
if(cutCheckCmdLineFlag( argc, argv, "dtoh"))
dtoh = true;
if(cutCheckCmdLineFlag( argc, argv, "dtod"))
dtod = true;
#if CUDART_VERSION >= 2020
if(cutCheckCmdLineFlag( argc, argv, "wc"))
wc = true;
#endif
if(cutCheckCmdLineFlag( argc, argv, "cputiming"))
bDontUseGPUTiming = true;
if( !htod && !dtoh && !dtod )
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if( RANGE_MODE == mode )
{
if( cutGetCmdLineArgumenti( argc, argv, "start", &start) )
{
if( start <= 0 )
{
printf("Illegal argument - start must be greater than zero\n");
return;
}
}
else
{
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "end", &end) )
{
if( end <= 0 )
{
printf("Illegal argument - end must be greater than zero\n");
return;
}
if( start > end )
{
printf("Illegal argument - start is greater than end\n");
return;
}
}
else
{
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "increment", &increment) )
{
if( increment <= 0 )
{
printf("Illegal argument - increment must be greater than zero\n");
return;
}
}
else
{
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return;
}
}
if( htod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if( dtoh )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if( dtod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
printf("&&&& Test PASSED\n");
cutFree( memModeStr);
return;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch( mode )
{
case QUICK_MODE:
printf("Quick Mode\n");
testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc );
break;
case RANGE_MODE:
printf("Range Mode\n");
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
printf("Shmoo Mode\n");
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
printf("Invalid testing mode\n");
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
//print information for use
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//run each of the copies
for(unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[i] = testDeviceToHostTransfer( memSizes[i], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[i] = testHostToDeviceTransfer( memSizes[i], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[i] = testDeviceToDeviceTransfer( memSizes[i] );
break;
}
printf(".");
}
hipDeviceReset();
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if(printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//print info for user
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while( memSize <= SHMOO_MEMSIZE_MAX )
{
if( memSize < SHMOO_LIMIT_20KB )
{
memSize += SHMOO_INCREMENT_1KB;
}
else if( memSize < SHMOO_LIMIT_50KB )
{
memSize += SHMOO_INCREMENT_2KB;
}else if( memSize < SHMOO_LIMIT_100KB )
{
memSize += SHMOO_INCREMENT_10KB;
}else if( memSize < SHMOO_LIMIT_1MB )
{
memSize += SHMOO_INCREMENT_100KB;
}else if( memSize < SHMOO_LIMIT_16MB )
{
memSize += SHMOO_INCREMENT_1MB;
}else if( memSize < SHMOO_LIMIT_32MB )
{
memSize += SHMOO_INCREMENT_2MB;
}else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] );
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if( CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
hipEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall ( hipEventCreate( &start ) );
cutilSafeCall ( hipEventCreate( &stop ) );
//allocate host memory
if( PINNED == memMode )
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
cutilSafeCall( hipHostMalloc( (void**)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
#else
cutilSafeCall( hipHostMalloc( (void**)&h_idata, memSize ) );
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc( memSize );
h_odata = (unsigned char *)malloc( memSize );
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char* d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, memSize));
//initialize the device memory
cutilSafeCall( hipMemcpy( d_idata, h_idata, memSize,
hipMemcpyHostToDevice) );
//copy data from GPU to Host
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( hipEventRecord( start, 0 ) );
if( PINNED == memMode )
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( hipMemcpyAsync( h_odata, d_idata, memSize,
hipMemcpyDeviceToHost, 0) );
}
}
else
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( hipMemcpy( h_odata, d_idata, memSize,
hipMemcpyDeviceToHost) );
}
}
cutilSafeCall( hipEventRecord( stop, 0 ) );
// make sure GPU has finished copying
cutilSafeCall( hipDeviceSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( hipEventDestroy(stop) );
cutilSafeCall( hipEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( hipHostFree(h_idata) );
cutilSafeCall( hipHostFree(h_odata) );
}
else
{
free(h_idata);
free(h_odata);
}
cutilSafeCall(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( hipEventCreate( &start ) );
cutilSafeCall( hipEventCreate( &stop ) );
//allocate host memory
unsigned char *h_odata = NULL;
if( PINNED == memMode )
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
#else
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc( memSize );
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char) (i & 0xff);
}
for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char* d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, memSize));
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( hipEventRecord( start, 0 ) );
//copy host memory to device memory
if( PINNED == memMode )
{
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( hipMemcpyAsync( d_idata, h_odata, memSize,
hipMemcpyHostToDevice, 0) );
}
}
else {
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( hipMemcpy( d_idata, h_odata, memSize,
hipMemcpyHostToDevice) );
}
}
cutilSafeCall( hipEventRecord( stop, 0 ) );
cutilSafeCall( hipDeviceSynchronize() );
//total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
cutilCheckError( cutResetTimer( timer));
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( hipEventDestroy(stop) );
cutilSafeCall( hipEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( hipHostFree(h_odata) );
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
cutilSafeCall(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( hipEventCreate( &start ) );
cutilSafeCall( hipEventCreate( &stop ) );
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc( memSize );
//initialize the host memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, memSize));
unsigned char *d_odata;
cutilSafeCall( hipMalloc( (void**) &d_odata, memSize));
//initialize memory
cutilSafeCall( hipMemcpy( d_idata, h_idata, memSize,
hipMemcpyHostToDevice) );
//run the memcopy
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( hipEventRecord( start, 0 ) );
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( hipMemcpy( d_odata, d_idata, memSize,
hipMemcpyDeviceToDevice) );
}
cutilSafeCall( hipEventRecord( stop, 0 ) );
//Since device to device memory copies are non-blocking,
//hipDeviceSynchronize() is required in order to get
//proper timing.
cutilSafeCall( hipDeviceSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilCheckError( cutDeleteTimer( timer));
free(h_idata);
cutilSafeCall(hipEventDestroy(stop));
cutilSafeCall(hipEventDestroy(start));
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer Size (Bytes)\tBandwidth(MB/s)\n");
for(unsigned int i = 0; i < count; i++)
{
printf("%9u\t\t%.1f\n", memSizes[i], bandwidths[i]);
}
printf("\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//print results in CSV format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer size (Bytes),");
for(unsigned int i = 0; i < count; i++)
{
printf("%u,", memSizes[i]);
}
printf("\n");
printf("Bandwidth (MB/s),");
for(unsigned int i = 0; i < count; i++)
{
printf("%.1f,", bandwidths[i]);
}
printf("\n\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: bandwidthTest [OPTION]...\n");
printf("Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
| f158520aa289da08e6d056bea439e4c4cf7dc333.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <cuda.h>
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(const int argc, const char **argv);
void testBandwidth( unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, (const char**)argv);
cutilExit(argc, argv);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
void runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PAGEABLE;
//process command line args
if(cutCheckCmdLineFlag( argc, argv, "help"))
{
printHelp();
return;
}
if(cutCheckCmdLineFlag( argc, argv, "csv"))
{
printmode = CSV;
}
if( cutGetCmdLineArgumentstr(argc, argv, "memory", &memModeStr) )
{
if( strcmp(memModeStr, "pageable") == 0 )
{
memMode = PAGEABLE;
}
else if( strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
printf("Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default - pageable memory
memMode = PAGEABLE;
}
if( cutGetCmdLineArgumentstr(argc, argv, "device", &device) )
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if( deviceCount == 0 )
{
printf("!!!!!No devices found!!!!!\n");
return;
}
if( strcmp (device, "all") == 0 )
{
printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if( startDevice >= deviceCount || startDevice < 0)
{
printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
printf("Running on......\n");
for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, currentDevice);
printf (" device %d:%s\n", currentDevice,deviceProp.name);
}
if( cutGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) )
{
//figure out the mode
if( strcmp(modeStr, "quick") == 0 )
{
mode = QUICK_MODE;
}
else if( strcmp(modeStr, "shmoo") == 0 )
{
mode = SHMOO_MODE;
}
else if( strcmp(modeStr, "range") == 0 )
{
mode = RANGE_MODE;
}
else
{
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default mode - quick
mode = QUICK_MODE;
}
if(cutCheckCmdLineFlag( argc, argv, "htod"))
htod = true;
if(cutCheckCmdLineFlag( argc, argv, "dtoh"))
dtoh = true;
if(cutCheckCmdLineFlag( argc, argv, "dtod"))
dtod = true;
#if CUDART_VERSION >= 2020
if(cutCheckCmdLineFlag( argc, argv, "wc"))
wc = true;
#endif
if(cutCheckCmdLineFlag( argc, argv, "cputiming"))
bDontUseGPUTiming = true;
if( !htod && !dtoh && !dtod )
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if( RANGE_MODE == mode )
{
if( cutGetCmdLineArgumenti( argc, argv, "start", &start) )
{
if( start <= 0 )
{
printf("Illegal argument - start must be greater than zero\n");
return;
}
}
else
{
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "end", &end) )
{
if( end <= 0 )
{
printf("Illegal argument - end must be greater than zero\n");
return;
}
if( start > end )
{
printf("Illegal argument - start is greater than end\n");
return;
}
}
else
{
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "increment", &increment) )
{
if( increment <= 0 )
{
printf("Illegal argument - increment must be greater than zero\n");
return;
}
}
else
{
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return;
}
}
if( htod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if( dtoh )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if( dtod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
printf("&&&& Test PASSED\n");
cutFree( memModeStr);
return;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch( mode )
{
case QUICK_MODE:
printf("Quick Mode\n");
testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc );
break;
case RANGE_MODE:
printf("Range Mode\n");
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
printf("Shmoo Mode\n");
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
printf("Invalid testing mode\n");
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
//print information for use
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//run each of the copies
for(unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[i] = testDeviceToHostTransfer( memSizes[i], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[i] = testHostToDeviceTransfer( memSizes[i], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[i] = testDeviceToDeviceTransfer( memSizes[i] );
break;
}
printf(".");
}
cudaThreadExit();
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if(printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//print info for user
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while( memSize <= SHMOO_MEMSIZE_MAX )
{
if( memSize < SHMOO_LIMIT_20KB )
{
memSize += SHMOO_INCREMENT_1KB;
}
else if( memSize < SHMOO_LIMIT_50KB )
{
memSize += SHMOO_INCREMENT_2KB;
}else if( memSize < SHMOO_LIMIT_100KB )
{
memSize += SHMOO_INCREMENT_10KB;
}else if( memSize < SHMOO_LIMIT_1MB )
{
memSize += SHMOO_INCREMENT_100KB;
}else if( memSize < SHMOO_LIMIT_16MB )
{
memSize += SHMOO_INCREMENT_1MB;
}else if( memSize < SHMOO_LIMIT_32MB )
{
memSize += SHMOO_INCREMENT_2MB;
}else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] );
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if( CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
cudaEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall ( cudaEventCreate( &start ) );
cutilSafeCall ( cudaEventCreate( &stop ) );
//allocate host memory
if( PINNED == memMode )
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
cutilSafeCall( cudaHostAlloc( (void**)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
cutilSafeCall( cudaMallocHost( (void**)&h_idata, memSize ) );
cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc( memSize );
h_odata = (unsigned char *)malloc( memSize );
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char* d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
//initialize the device memory
cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize,
cudaMemcpyHostToDevice) );
//copy data from GPU to Host
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( cudaEventRecord( start, 0 ) );
if( PINNED == memMode )
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( cudaMemcpyAsync( h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost, 0) );
}
}
else
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( cudaMemcpy( h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost) );
}
}
cutilSafeCall( cudaEventRecord( stop, 0 ) );
// make sure GPU has finished copying
cutilSafeCall( cudaThreadSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( cudaEventDestroy(stop) );
cutilSafeCall( cudaEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( cudaFreeHost(h_idata) );
cutilSafeCall( cudaFreeHost(h_odata) );
}
else
{
free(h_idata);
free(h_odata);
}
cutilSafeCall(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( cudaEventCreate( &start ) );
cutilSafeCall( cudaEventCreate( &stop ) );
//allocate host memory
unsigned char *h_odata = NULL;
if( PINNED == memMode )
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc( memSize );
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char) (i & 0xff);
}
for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char* d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( cudaEventRecord( start, 0 ) );
//copy host memory to device memory
if( PINNED == memMode )
{
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( cudaMemcpyAsync( d_idata, h_odata, memSize,
cudaMemcpyHostToDevice, 0) );
}
}
else {
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( cudaMemcpy( d_idata, h_odata, memSize,
cudaMemcpyHostToDevice) );
}
}
cutilSafeCall( cudaEventRecord( stop, 0 ) );
cutilSafeCall( cudaThreadSynchronize() );
//total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
cutilCheckError( cutResetTimer( timer));
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( cudaEventDestroy(stop) );
cutilSafeCall( cudaEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( cudaFreeHost(h_odata) );
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
cutilSafeCall(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( cudaEventCreate( &start ) );
cutilSafeCall( cudaEventCreate( &stop ) );
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc( memSize );
//initialize the host memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
unsigned char *d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_odata, memSize));
//initialize memory
cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize,
cudaMemcpyHostToDevice) );
//run the memcopy
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( cudaEventRecord( start, 0 ) );
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( cudaMemcpy( d_odata, d_idata, memSize,
cudaMemcpyDeviceToDevice) );
}
cutilSafeCall( cudaEventRecord( stop, 0 ) );
//Since device to device memory copies are non-blocking,
//cudaThreadSynchronize() is required in order to get
//proper timing.
cutilSafeCall( cudaThreadSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilCheckError( cutDeleteTimer( timer));
free(h_idata);
cutilSafeCall(cudaEventDestroy(stop));
cutilSafeCall(cudaEventDestroy(start));
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer Size (Bytes)\tBandwidth(MB/s)\n");
for(unsigned int i = 0; i < count; i++)
{
printf("%9u\t\t%.1f\n", memSizes[i], bandwidths[i]);
}
printf("\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//print results in CSV format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer size (Bytes),");
for(unsigned int i = 0; i < count; i++)
{
printf("%u,", memSizes[i]);
}
printf("\n");
printf("Bandwidth (MB/s),");
for(unsigned int i = 0; i < count; i++)
{
printf("%.1f,", bandwidths[i]);
}
printf("\n\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: bandwidthTest [OPTION]...\n");
printf("Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
|
dbbe7021d9b8b2ac656fb33171de8a1987e355dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// ******************************************************************************************************
// PURPOSE : Print thread IDs for the 256 threads of a 2D configuration (16 * 16) *
// LANGUAGE : CUDA C / CUDA C++ *
// ASSUMPTIONS : 2D Configuration 16 threads in each x & y directions with thread block of (8*8) *
// threadIdx.z value will be zero since it is 2D configuration *
// DATE : 23 March 2020 *
// AUTHOR : Vaibhav BENDRE *
// [email protected] *
// ******************************************************************************************************
__global__ void printThreadIDs() {
printf("\n threadIdx.x : %d, threadIdx.y : %d ",threadIdx.x,threadIdx.y);
} | dbbe7021d9b8b2ac656fb33171de8a1987e355dc.cu | #include "includes.h"
// ******************************************************************************************************
// PURPOSE : Print thread IDs for the 256 threads of a 2D configuration (16 * 16) *
// LANGUAGE : CUDA C / CUDA C++ *
// ASSUMPTIONS : 2D Configuration 16 threads in each x & y directions with thread block of (8*8) *
// threadIdx.z value will be zero since it is 2D configuration *
// DATE : 23 March 2020 *
// AUTHOR : Vaibhav BENDRE *
// [email protected] *
// ******************************************************************************************************
__global__ void printThreadIDs() {
printf("\n threadIdx.x : %d, threadIdx.y : %d ",threadIdx.x,threadIdx.y);
} |
316629e13fa930b0203c29548e5cf6f072f2f53e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmgeellmv.cu normal z -> c, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
cmgeellmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
extern __shared__ magmaFloatComplex dot[];
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_cols_per_row * row + n ];
magmaFloatComplex val = d_val [ num_cols_per_row * row + n ];
if( val != 0){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++)
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i * num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELLPACK.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in ELLPACK
@param
d_colind magma_int_t*
columnindices of A in ELLPACK
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
d_y magmaFloatComplex*
input/output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeellmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
hipLaunchKernelGGL(( cmgeellmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0,
m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
| 316629e13fa930b0203c29548e5cf6f072f2f53e.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmgeellmv.cu normal z -> c, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
cmgeellmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
extern __shared__ magmaFloatComplex dot[];
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_cols_per_row * row + n ];
magmaFloatComplex val = d_val [ num_cols_per_row * row + n ];
if( val != 0){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++)
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i * num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELLPACK.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha magmaFloatComplex
scalar multiplier
@param
d_val magmaFloatComplex*
array containing values of A in ELLPACK
@param
d_colind magma_int_t*
columnindices of A in ELLPACK
@param
d_x magmaFloatComplex*
input vector x
@param
beta magmaFloatComplex
scalar multiplier
@param
d_y magmaFloatComplex*
input/output vector y
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeellmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d_x,
magmaFloatComplex beta,
magmaFloatComplex *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
cmgeellmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>>
( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
d2f523ecc3cce63e112b5a063790f0380f1bcda9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <[email protected]>
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/sequence.h>
#include <thrust/count.h>
#include "test_cuda_common.h"
using namespace seqan;
// ============================================================================
// Types
// ============================================================================
typedef TagList<String<char, Alloc<> >,
TagList<String<Dna, Alloc<> >,
TagList<String<Dna5, Alloc<> >
// TagList<String<Dna, Packed<> >
> > > //>
StringTypes;
// TODO(esiragusa): test StringSets.
//typedef TagList<StringSet<CharString, Owner<ConcatDirect<> > >,
// TagList<StringSet<DnaString, Owner<ConcatDirect<> > >
// > >
// TStringSetTypes;
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<StringTypes, Owner<ConcatDirect<> > >::Type TStringSetTypes;
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class CudaSequenceTest
// ----------------------------------------------------------------------------
template <typename TType>
class CudaSequenceTest : public Test
{
public:
typedef TType TString;
typedef typename Device<TString>::Type TCudaString;
typedef typename Value<TString>::Type TAlphabet;
TString str;
CudaSequenceTest() :
str("ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT")
{}
};
SEQAN_TYPED_TEST_CASE(CudaSequenceTest, StringTypes);
// ============================================================================
// Tests
// ============================================================================
// ----------------------------------------------------------------------------
// Test assign()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, Assign)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename TestFixture::TAlphabet TAlphabet;
hipDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
SEQAN_ASSERT_EQ(length(cudaStr), length(this->str));
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('A')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('C')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('G')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('T')), 10u);
// TString str;
// assign(cudaStr, str);
// SEQAN_ASSERT_EQ(str, this->str);
}
// ----------------------------------------------------------------------------
// Test getValue()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, GetValue)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename View<TCudaString>::Type TCudaStringView;
typedef typename Size<TString>::Type TSize;
hipDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
TCudaStringView cudaStrView = view(cudaStr);
for (TSize pos = 0; pos < length(this->str); pos++)
{
hipLaunchKernelGGL(( testGetValue), dim3(1),dim3(1), 0, 0, cudaStrView, pos, getValue(this->str, pos));
hipDeviceSynchronize();
SEQAN_ASSERT_EQ(hipGetLastError(), hipSuccess);
}
}
// ============================================================================
// Register Tests
// ============================================================================
int main(int argc, char const ** argv)
{
TestSystem::init(argc, argv);
return TestSystem::runAll();
}
| d2f523ecc3cce63e112b5a063790f0380f1bcda9.cu | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <[email protected]>
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/sequence.h>
#include <thrust/count.h>
#include "test_cuda_common.h"
using namespace seqan;
// ============================================================================
// Types
// ============================================================================
typedef TagList<String<char, Alloc<> >,
TagList<String<Dna, Alloc<> >,
TagList<String<Dna5, Alloc<> >
// TagList<String<Dna, Packed<> >
> > > //>
StringTypes;
// TODO(esiragusa): test StringSets.
//typedef TagList<StringSet<CharString, Owner<ConcatDirect<> > >,
// TagList<StringSet<DnaString, Owner<ConcatDirect<> > >
// > >
// TStringSetTypes;
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<StringTypes, Owner<ConcatDirect<> > >::Type TStringSetTypes;
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class CudaSequenceTest
// ----------------------------------------------------------------------------
template <typename TType>
class CudaSequenceTest : public Test
{
public:
typedef TType TString;
typedef typename Device<TString>::Type TCudaString;
typedef typename Value<TString>::Type TAlphabet;
TString str;
CudaSequenceTest() :
str("ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT")
{}
};
SEQAN_TYPED_TEST_CASE(CudaSequenceTest, StringTypes);
// ============================================================================
// Tests
// ============================================================================
// ----------------------------------------------------------------------------
// Test assign()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, Assign)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename TestFixture::TAlphabet TAlphabet;
cudaDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
SEQAN_ASSERT_EQ(length(cudaStr), length(this->str));
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('A')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('C')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('G')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('T')), 10u);
// TString str;
// assign(cudaStr, str);
// SEQAN_ASSERT_EQ(str, this->str);
}
// ----------------------------------------------------------------------------
// Test getValue()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, GetValue)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename View<TCudaString>::Type TCudaStringView;
typedef typename Size<TString>::Type TSize;
cudaDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
TCudaStringView cudaStrView = view(cudaStr);
for (TSize pos = 0; pos < length(this->str); pos++)
{
testGetValue<<<1,1>>>(cudaStrView, pos, getValue(this->str, pos));
cudaDeviceSynchronize();
SEQAN_ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
}
// ============================================================================
// Register Tests
// ============================================================================
int main(int argc, char const ** argv)
{
TestSystem::init(argc, argv);
return TestSystem::runAll();
}
|
1c4c6af372236185d2e64f2cb4bf0751977b5762.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
} | 1c4c6af372236185d2e64f2cb4bf0751977b5762.cu | #include "includes.h"
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
} |
e8dc5928ebdc04443274589e62a54710afbdf2e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/brute_force.cuh>
#include <rmm/device_buffer.hpp>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <vector>
namespace raft::neighbors::brute_force {
struct KNNInputs {
std::vector<std::vector<float>> input;
int k;
std::vector<int> labels;
};
template <typename IdxT>
__global__ void build_actual_output(
int* output, int n_rows, int k, const int* idx_labels, const IdxT* indices)
{
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= n_rows * k) return;
output[element] = idx_labels[indices[element]];
}
__global__ void build_expected_output(int* output, int n_rows, int k, const int* labels)
{
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
}
template <typename T, typename IdxT>
class KNNTest : public ::testing::TestWithParam<KNNInputs> {
public:
KNNTest()
: params_(::testing::TestWithParam<KNNInputs>::GetParam()),
stream(resource::get_cuda_stream(handle)),
actual_labels_(0, stream),
expected_labels_(0, stream),
input_(0, stream),
search_data_(0, stream),
indices_(0, stream),
distances_(0, stream),
search_labels_(0, stream)
{
}
protected:
void testBruteForce()
{
// #if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_DEBUG)
raft::print_device_vector("Input array: ", input_.data(), rows_ * cols_, std::cout);
std::cout << "K: " << k_ << std::endl;
raft::print_device_vector("Labels array: ", search_labels_.data(), rows_, std::cout);
// #endif
std::vector<device_matrix_view<const T, IdxT, row_major>> index = {
make_device_matrix_view((const T*)(input_.data()), rows_, cols_)};
auto search = raft::make_device_matrix_view<const T, IdxT, row_major>(
(const T*)(search_data_.data()), rows_, cols_);
auto indices = raft::make_device_matrix_view<IdxT, IdxT, row_major>(indices_.data(), rows_, k_);
auto distances =
raft::make_device_matrix_view<T, IdxT, row_major>(distances_.data(), rows_, k_);
auto metric = raft::distance::DistanceType::L2Unexpanded;
knn(handle, index, search, indices, distances, metric, std::make_optional<IdxT>(0));
hipLaunchKernelGGL(( build_actual_output), dim3(raft::ceildiv(rows_ * k_, 32)), dim3(32), 0, stream,
actual_labels_.data(), rows_, k_, search_labels_.data(), indices_.data());
hipLaunchKernelGGL(( build_expected_output), dim3(raft::ceildiv(rows_ * k_, 32)), dim3(32), 0, stream,
expected_labels_.data(), rows_, k_, search_labels_.data());
ASSERT_TRUE(devArrMatch(
expected_labels_.data(), actual_labels_.data(), rows_ * k_, raft::Compare<int>(), stream));
}
void SetUp() override
{
rows_ = params_.input.size();
cols_ = params_.input[0].size();
k_ = params_.k;
actual_labels_.resize(rows_ * k_, stream);
expected_labels_.resize(rows_ * k_, stream);
input_.resize(rows_ * cols_, stream);
search_data_.resize(rows_ * cols_, stream);
indices_.resize(rows_ * k_, stream);
distances_.resize(rows_ * k_, stream);
search_labels_.resize(rows_, stream);
RAFT_CUDA_TRY(
hipMemsetAsync(actual_labels_.data(), 0, actual_labels_.size() * sizeof(int), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(expected_labels_.data(), 0, expected_labels_.size() * sizeof(int), stream));
RAFT_CUDA_TRY(hipMemsetAsync(input_.data(), 0, input_.size() * sizeof(float), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(search_data_.data(), 0, search_data_.size() * sizeof(float), stream));
RAFT_CUDA_TRY(hipMemsetAsync(indices_.data(), 0, indices_.size() * sizeof(IdxT), stream));
RAFT_CUDA_TRY(hipMemsetAsync(distances_.data(), 0, distances_.size() * sizeof(float), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(search_labels_.data(), 0, search_labels_.size() * sizeof(int), stream));
std::vector<float> row_major_input;
for (std::size_t i = 0; i < params_.input.size(); ++i) {
for (std::size_t j = 0; j < params_.input[i].size(); ++j) {
row_major_input.push_back(params_.input[i][j]);
}
}
rmm::device_buffer input_d =
rmm::device_buffer(row_major_input.data(), row_major_input.size() * sizeof(float), stream);
float* input_ptr = static_cast<float*>(input_d.data());
rmm::device_buffer labels_d =
rmm::device_buffer(params_.labels.data(), params_.labels.size() * sizeof(int), stream);
int* labels_ptr = static_cast<int*>(labels_d.data());
raft::copy(input_.data(), input_ptr, rows_ * cols_, stream);
raft::copy(search_data_.data(), input_ptr, rows_ * cols_, stream);
raft::copy(search_labels_.data(), labels_ptr, rows_, stream);
resource::sync_stream(handle, stream);
}
private:
raft::resources handle;
hipStream_t stream;
KNNInputs params_;
int rows_;
int cols_;
rmm::device_uvector<float> input_;
rmm::device_uvector<float> search_data_;
rmm::device_uvector<IdxT> indices_;
rmm::device_uvector<float> distances_;
int k_;
rmm::device_uvector<int> search_labels_;
rmm::device_uvector<int> actual_labels_;
rmm::device_uvector<int> expected_labels_;
};
const std::vector<KNNInputs> inputs = {
// 2D
{{
{2.7810836, 2.550537003},
{1.465489372, 2.362125076},
{3.396561688, 4.400293529},
{1.38807019, 1.850220317},
{3.06407232, 3.005305973},
{7.627531214, 2.759262235},
{5.332441248, 2.088626775},
{6.922596716, 1.77106367},
{8.675418651, -0.242068655},
{7.673756466, 3.508563011},
},
2,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1}}};
typedef KNNTest<float, int> KNNTestFint32_t;
TEST_P(KNNTestFint32_t, BruteForce) { this->testBruteForce(); }
typedef KNNTest<float, uint32_t> KNNTestFuint32_t;
TEST_P(KNNTestFuint32_t, BruteForce) { this->testBruteForce(); }
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFint32_t, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFuint32_t, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::brute_force
| e8dc5928ebdc04443274589e62a54710afbdf2e7.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/brute_force.cuh>
#include <rmm/device_buffer.hpp>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <vector>
namespace raft::neighbors::brute_force {
struct KNNInputs {
std::vector<std::vector<float>> input;
int k;
std::vector<int> labels;
};
template <typename IdxT>
__global__ void build_actual_output(
int* output, int n_rows, int k, const int* idx_labels, const IdxT* indices)
{
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= n_rows * k) return;
output[element] = idx_labels[indices[element]];
}
__global__ void build_expected_output(int* output, int n_rows, int k, const int* labels)
{
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
}
template <typename T, typename IdxT>
class KNNTest : public ::testing::TestWithParam<KNNInputs> {
public:
KNNTest()
: params_(::testing::TestWithParam<KNNInputs>::GetParam()),
stream(resource::get_cuda_stream(handle)),
actual_labels_(0, stream),
expected_labels_(0, stream),
input_(0, stream),
search_data_(0, stream),
indices_(0, stream),
distances_(0, stream),
search_labels_(0, stream)
{
}
protected:
void testBruteForce()
{
// #if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_DEBUG)
raft::print_device_vector("Input array: ", input_.data(), rows_ * cols_, std::cout);
std::cout << "K: " << k_ << std::endl;
raft::print_device_vector("Labels array: ", search_labels_.data(), rows_, std::cout);
// #endif
std::vector<device_matrix_view<const T, IdxT, row_major>> index = {
make_device_matrix_view((const T*)(input_.data()), rows_, cols_)};
auto search = raft::make_device_matrix_view<const T, IdxT, row_major>(
(const T*)(search_data_.data()), rows_, cols_);
auto indices = raft::make_device_matrix_view<IdxT, IdxT, row_major>(indices_.data(), rows_, k_);
auto distances =
raft::make_device_matrix_view<T, IdxT, row_major>(distances_.data(), rows_, k_);
auto metric = raft::distance::DistanceType::L2Unexpanded;
knn(handle, index, search, indices, distances, metric, std::make_optional<IdxT>(0));
build_actual_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>(
actual_labels_.data(), rows_, k_, search_labels_.data(), indices_.data());
build_expected_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>(
expected_labels_.data(), rows_, k_, search_labels_.data());
ASSERT_TRUE(devArrMatch(
expected_labels_.data(), actual_labels_.data(), rows_ * k_, raft::Compare<int>(), stream));
}
void SetUp() override
{
rows_ = params_.input.size();
cols_ = params_.input[0].size();
k_ = params_.k;
actual_labels_.resize(rows_ * k_, stream);
expected_labels_.resize(rows_ * k_, stream);
input_.resize(rows_ * cols_, stream);
search_data_.resize(rows_ * cols_, stream);
indices_.resize(rows_ * k_, stream);
distances_.resize(rows_ * k_, stream);
search_labels_.resize(rows_, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(actual_labels_.data(), 0, actual_labels_.size() * sizeof(int), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(expected_labels_.data(), 0, expected_labels_.size() * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(input_.data(), 0, input_.size() * sizeof(float), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(search_data_.data(), 0, search_data_.size() * sizeof(float), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(indices_.data(), 0, indices_.size() * sizeof(IdxT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(distances_.data(), 0, distances_.size() * sizeof(float), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(search_labels_.data(), 0, search_labels_.size() * sizeof(int), stream));
std::vector<float> row_major_input;
for (std::size_t i = 0; i < params_.input.size(); ++i) {
for (std::size_t j = 0; j < params_.input[i].size(); ++j) {
row_major_input.push_back(params_.input[i][j]);
}
}
rmm::device_buffer input_d =
rmm::device_buffer(row_major_input.data(), row_major_input.size() * sizeof(float), stream);
float* input_ptr = static_cast<float*>(input_d.data());
rmm::device_buffer labels_d =
rmm::device_buffer(params_.labels.data(), params_.labels.size() * sizeof(int), stream);
int* labels_ptr = static_cast<int*>(labels_d.data());
raft::copy(input_.data(), input_ptr, rows_ * cols_, stream);
raft::copy(search_data_.data(), input_ptr, rows_ * cols_, stream);
raft::copy(search_labels_.data(), labels_ptr, rows_, stream);
resource::sync_stream(handle, stream);
}
private:
raft::resources handle;
cudaStream_t stream;
KNNInputs params_;
int rows_;
int cols_;
rmm::device_uvector<float> input_;
rmm::device_uvector<float> search_data_;
rmm::device_uvector<IdxT> indices_;
rmm::device_uvector<float> distances_;
int k_;
rmm::device_uvector<int> search_labels_;
rmm::device_uvector<int> actual_labels_;
rmm::device_uvector<int> expected_labels_;
};
const std::vector<KNNInputs> inputs = {
// 2D
{{
{2.7810836, 2.550537003},
{1.465489372, 2.362125076},
{3.396561688, 4.400293529},
{1.38807019, 1.850220317},
{3.06407232, 3.005305973},
{7.627531214, 2.759262235},
{5.332441248, 2.088626775},
{6.922596716, 1.77106367},
{8.675418651, -0.242068655},
{7.673756466, 3.508563011},
},
2,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1}}};
typedef KNNTest<float, int> KNNTestFint32_t;
TEST_P(KNNTestFint32_t, BruteForce) { this->testBruteForce(); }
typedef KNNTest<float, uint32_t> KNNTestFuint32_t;
TEST_P(KNNTestFuint32_t, BruteForce) { this->testBruteForce(); }
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFint32_t, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFuint32_t, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::brute_force
|
e877b43d4f81ac9fb8b53797e599b7559fb39ccc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matMulKernel20.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *P = NULL;
hipMalloc(&P, XSIZE*YSIZE);
float *M = NULL;
hipMalloc(&M, XSIZE*YSIZE);
float *N = NULL;
hipMalloc(&N, XSIZE*YSIZE);
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matMulKernel20), dim3(gridBlock),dim3(threadBlock), 0, 0, P,M,N,width);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matMulKernel20), dim3(gridBlock),dim3(threadBlock), 0, 0, P,M,N,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matMulKernel20), dim3(gridBlock),dim3(threadBlock), 0, 0, P,M,N,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e877b43d4f81ac9fb8b53797e599b7559fb39ccc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matMulKernel20.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *P = NULL;
cudaMalloc(&P, XSIZE*YSIZE);
float *M = NULL;
cudaMalloc(&M, XSIZE*YSIZE);
float *N = NULL;
cudaMalloc(&N, XSIZE*YSIZE);
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matMulKernel20<<<gridBlock,threadBlock>>>(P,M,N,width);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matMulKernel20<<<gridBlock,threadBlock>>>(P,M,N,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matMulKernel20<<<gridBlock,threadBlock>>>(P,M,N,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7812a7b77fad0b63771eb1d52aa6dc26d88065ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ __forceinline__ float sigmoid(float a) {
return 1.0 / (1.0 + exp (-a));
}
__global__ void sigmoid_derivative(float *upper_grads, float *upper_values, unsigned int upper_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < upper_size)
upper_grads[index] *= upper_values[index]*(1.0f - upper_values[index]);
} | 7812a7b77fad0b63771eb1d52aa6dc26d88065ae.cu | #include "includes.h"
__device__ __forceinline__ float sigmoid(float a) {
return 1.0 / (1.0 + exp (-a));
}
__global__ void sigmoid_derivative(float *upper_grads, float *upper_values, unsigned int upper_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < upper_size)
upper_grads[index] *= upper_values[index]*(1.0f - upper_values[index]);
} |
d0087b4c883af16b9f0f15729b3548f73b38be51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void elementwise_kernel(int volume,
OpType type,
const Tensor xTensor,
const Tensor yTensor,
const Tensor zTensor,
const DATATYPE* x,
const DATATYPE* y,
DATATYPE* z)
{
int pos[6];
assert(zTensor.numDim <= 6);
CUDA_KERNEL_LOOP(id_z, volume)
{
int id_x = 0, id_y = 0;
for (int j = 0; j < zTensor.numDim; j++) {
pos[j] = (id_z / zTensor.stride[j]) % zTensor.dim[j];
}
int diff = zTensor.numDim - xTensor.numDim;
for (int j = 0; j < xTensor.numDim; j++) {
id_x += xTensor.stride[j] * pos[j + diff];
}
diff = zTensor.numDim - yTensor.numDim;
for (int j = 0; j < yTensor.numDim; j++) {
id_y += yTensor.stride[j] * pos[j + diff];
}
switch (type) {
case OP_EW_ADD:
{
z[id_z] = x[id_x] + y[id_y];
break;
}
case OP_EW_MUL:
{
z[id_z] = x[id_x] * y[id_y];
break;
}
case OP_EW_MAX:
{
z[id_z] = max(x[id_x], y[id_y]);
break;
}
case OP_EW_MIN:
{
z[id_z] = min(x[id_x], y[id_y]);
break;
}
case OP_EW_SUB:
{
z[id_z] = x[id_x] - y[id_y];
break;
}
case OP_EW_DIV:
{
z[id_z] = x[id_x] / y[id_y];
break;
}
case OP_EW_EQUAL:
{
z[id_z] = (x[id_x] == y[id_y]);
break;
}
case OP_EW_GREATER:
{
z[id_z] = (x[id_x] > y[id_y]);
break;
}
case OP_EW_LESS:
{
z[id_z] = (x[id_x] < y[id_y]);
break;
}
case OP_PRELU:
{
z[id_z] = x[id_x] >= 0 ? x[id_x] : y[id_y] * x[id_x];
break;
}
default:
assert(false);
}
}
}
bool Element::use_kernel(void) const
{
switch (type) {
case OP_EW_ADD:
case OP_EW_MUL:
case OP_EW_MAX:
case OP_EW_MIN:
break;
default:
return false;
}
// use cudnn kernel only if inputs and output have default layouts
if (inputs[0].default_layout() && inputs[1].default_layout()
&& outputs[0].default_layout()) {
// Do nothing
} else {
return false;
}
// CUDNNv7.6.5 raequirement: Each dimension of the input tensor A must
// match the corresponding dimension of the destination tensor C, and
// each dimension of the input tensor B must match the corresponding
// dimension of the destination tensor C or must be equal to 1.
// In the latter case, the same value from the input tensor B for
// those dimensions will be used to blend into the C tensor.
if (inputs[0].volume() != outputs[0].volume())
return false;
return true;
}
void Element::map(void)
{
if (use_kernel()) {
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&in1Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&in2Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outTensor));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
// set descriptors
helperSetBroadcastableTensorDescriptor(inputs[0], outputs[0], in1Tensor);
helperSetBroadcastableTensorDescriptor(inputs[1], outputs[0], in2Tensor);
helperSetTensorDescriptor(outputs[0], outTensor);
cudnnOpTensorOp_t opType;
switch (type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", type);
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
} else {
// No preprocessing for our customized kernel
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
}
void Element::unmap(void)
{
if (use_kernel()) {
checkCUDNN(cudnnDestroyTensorDescriptor(in1Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(in2Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outTensor));
checkCUDNN(cudnnDestroyOpTensorDescriptor(opDesc));
}
checkCUDA(hipFree(outputs[0].data_ptr));
}
void Element::forward(bool block)
{
if (use_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
checkCUDNN(cudnnOpTensor(model->dnn, opDesc, &alpha, in1Tensor, inputs[0].data_ptr,
&alpha, in2Tensor, inputs[1].data_ptr, &beta, outTensor, outputs[0].data_ptr));
} else {
hipLaunchKernelGGL(( elementwise_kernel), dim3(GET_BLOCKS(outputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
outputs[0].volume(), type, inputs[0], inputs[1], outputs[0],
(DATATYPE*)inputs[0].data_ptr, (DATATYPE*)inputs[1].data_ptr,
(DATATYPE*)outputs[0].data_ptr);
}
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_element_cost(Element* ele)
{
// cudnnOpTensor only supports OP_EW_ADD, OP_EW_MUL, OP_EW_MAX, OP_EW_MIN
if (ele->use_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
helperSetBroadcastableTensorDescriptor(ele->inputs[0],
ele->outputs[0], inputTensor);
helperSetBroadcastableTensorDescriptor(ele->inputs[1],
ele->outputs[0], biasTensor);
helperSetTensorDescriptor(ele->outputs[0], outputTensor);
cudnnOpTensorOp_t opType;
switch (ele->type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
{
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", ele->type);
assert(false);
}
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
checkCUDNN(cudnnOpTensor(dnn, opDesc, &alpha, inputTensor, inputPtr,
&alpha, biasTensor, filterPtr, &beta, outputTensor, outputPtr));
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
} else {
// Use our implementation to measure other elementwise operators
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
hipLaunchKernelGGL(( elementwise_kernel), dim3(GET_BLOCKS(ele->outputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
ele->outputs[0].volume(), ele->type, ele->inputs[0], ele->inputs[1],
ele->outputs[0], inputPtr, filterPtr, outputPtr);
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
}
}
| d0087b4c883af16b9f0f15729b3548f73b38be51.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void elementwise_kernel(int volume,
OpType type,
const Tensor xTensor,
const Tensor yTensor,
const Tensor zTensor,
const DATATYPE* x,
const DATATYPE* y,
DATATYPE* z)
{
int pos[6];
assert(zTensor.numDim <= 6);
CUDA_KERNEL_LOOP(id_z, volume)
{
int id_x = 0, id_y = 0;
for (int j = 0; j < zTensor.numDim; j++) {
pos[j] = (id_z / zTensor.stride[j]) % zTensor.dim[j];
}
int diff = zTensor.numDim - xTensor.numDim;
for (int j = 0; j < xTensor.numDim; j++) {
id_x += xTensor.stride[j] * pos[j + diff];
}
diff = zTensor.numDim - yTensor.numDim;
for (int j = 0; j < yTensor.numDim; j++) {
id_y += yTensor.stride[j] * pos[j + diff];
}
switch (type) {
case OP_EW_ADD:
{
z[id_z] = x[id_x] + y[id_y];
break;
}
case OP_EW_MUL:
{
z[id_z] = x[id_x] * y[id_y];
break;
}
case OP_EW_MAX:
{
z[id_z] = max(x[id_x], y[id_y]);
break;
}
case OP_EW_MIN:
{
z[id_z] = min(x[id_x], y[id_y]);
break;
}
case OP_EW_SUB:
{
z[id_z] = x[id_x] - y[id_y];
break;
}
case OP_EW_DIV:
{
z[id_z] = x[id_x] / y[id_y];
break;
}
case OP_EW_EQUAL:
{
z[id_z] = (x[id_x] == y[id_y]);
break;
}
case OP_EW_GREATER:
{
z[id_z] = (x[id_x] > y[id_y]);
break;
}
case OP_EW_LESS:
{
z[id_z] = (x[id_x] < y[id_y]);
break;
}
case OP_PRELU:
{
z[id_z] = x[id_x] >= 0 ? x[id_x] : y[id_y] * x[id_x];
break;
}
default:
assert(false);
}
}
}
bool Element::use_kernel(void) const
{
switch (type) {
case OP_EW_ADD:
case OP_EW_MUL:
case OP_EW_MAX:
case OP_EW_MIN:
break;
default:
return false;
}
// use cudnn kernel only if inputs and output have default layouts
if (inputs[0].default_layout() && inputs[1].default_layout()
&& outputs[0].default_layout()) {
// Do nothing
} else {
return false;
}
// CUDNNv7.6.5 raequirement: Each dimension of the input tensor A must
// match the corresponding dimension of the destination tensor C, and
// each dimension of the input tensor B must match the corresponding
// dimension of the destination tensor C or must be equal to 1.
// In the latter case, the same value from the input tensor B for
// those dimensions will be used to blend into the C tensor.
if (inputs[0].volume() != outputs[0].volume())
return false;
return true;
}
void Element::map(void)
{
if (use_kernel()) {
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&in1Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&in2Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outTensor));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
// set descriptors
helperSetBroadcastableTensorDescriptor(inputs[0], outputs[0], in1Tensor);
helperSetBroadcastableTensorDescriptor(inputs[1], outputs[0], in2Tensor);
helperSetTensorDescriptor(outputs[0], outTensor);
cudnnOpTensorOp_t opType;
switch (type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", type);
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
} else {
// No preprocessing for our customized kernel
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
}
void Element::unmap(void)
{
if (use_kernel()) {
checkCUDNN(cudnnDestroyTensorDescriptor(in1Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(in2Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outTensor));
checkCUDNN(cudnnDestroyOpTensorDescriptor(opDesc));
}
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void Element::forward(bool block)
{
if (use_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
checkCUDNN(cudnnOpTensor(model->dnn, opDesc, &alpha, in1Tensor, inputs[0].data_ptr,
&alpha, in2Tensor, inputs[1].data_ptr, &beta, outTensor, outputs[0].data_ptr));
} else {
elementwise_kernel<<<GET_BLOCKS(outputs[0].volume()), CUDA_NUM_THREADS>>>(
outputs[0].volume(), type, inputs[0], inputs[1], outputs[0],
(DATATYPE*)inputs[0].data_ptr, (DATATYPE*)inputs[1].data_ptr,
(DATATYPE*)outputs[0].data_ptr);
}
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_element_cost(Element* ele)
{
// cudnnOpTensor only supports OP_EW_ADD, OP_EW_MUL, OP_EW_MAX, OP_EW_MIN
if (ele->use_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
helperSetBroadcastableTensorDescriptor(ele->inputs[0],
ele->outputs[0], inputTensor);
helperSetBroadcastableTensorDescriptor(ele->inputs[1],
ele->outputs[0], biasTensor);
helperSetTensorDescriptor(ele->outputs[0], outputTensor);
cudnnOpTensorOp_t opType;
switch (ele->type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
{
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", ele->type);
assert(false);
}
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
checkCUDNN(cudnnOpTensor(dnn, opDesc, &alpha, inputTensor, inputPtr,
&alpha, biasTensor, filterPtr, &beta, outputTensor, outputPtr));
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
} else {
// Use our implementation to measure other elementwise operators
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
elementwise_kernel<<<GET_BLOCKS(ele->outputs[0].volume()), CUDA_NUM_THREADS>>>(
ele->outputs[0].volume(), ele->type, ele->inputs[0], ele->inputs[1],
ele->outputs[0], inputPtr, filterPtr, outputPtr);
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
}
}
|
9e78ab5277c488678d4cd678d39abd543b01fbc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_preproc_kernel;
int xdim0_preproc_kernel_h = -1;
__constant__ int ydim0_preproc_kernel;
int ydim0_preproc_kernel_h = -1;
__constant__ int xdim1_preproc_kernel;
int xdim1_preproc_kernel_h = -1;
__constant__ int ydim1_preproc_kernel;
int ydim1_preproc_kernel_h = -1;
__constant__ int xdim2_preproc_kernel;
int xdim2_preproc_kernel_h = -1;
__constant__ int ydim2_preproc_kernel;
int ydim2_preproc_kernel_h = -1;
__constant__ int xdim3_preproc_kernel;
int xdim3_preproc_kernel_h = -1;
__constant__ int ydim3_preproc_kernel;
int ydim3_preproc_kernel_h = -1;
__constant__ int xdim4_preproc_kernel;
int xdim4_preproc_kernel_h = -1;
__constant__ int ydim4_preproc_kernel;
int ydim4_preproc_kernel_h = -1;
__constant__ int xdim5_preproc_kernel;
int xdim5_preproc_kernel_h = -1;
__constant__ int ydim5_preproc_kernel;
int ydim5_preproc_kernel_h = -1;
__constant__ int xdim6_preproc_kernel;
int xdim6_preproc_kernel_h = -1;
__constant__ int ydim6_preproc_kernel;
int ydim6_preproc_kernel_h = -1;
__constant__ int xdim7_preproc_kernel;
int xdim7_preproc_kernel_h = -1;
__constant__ int ydim7_preproc_kernel;
int ydim7_preproc_kernel_h = -1;
__constant__ int xdim8_preproc_kernel;
int xdim8_preproc_kernel_h = -1;
__constant__ int ydim8_preproc_kernel;
int ydim8_preproc_kernel_h = -1;
__constant__ int xdim9_preproc_kernel;
int xdim9_preproc_kernel_h = -1;
__constant__ int ydim9_preproc_kernel;
int ydim9_preproc_kernel_h = -1;
__constant__ int xdim10_preproc_kernel;
int xdim10_preproc_kernel_h = -1;
__constant__ int ydim10_preproc_kernel;
int ydim10_preproc_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#define OPS_ACC0(x,y,z) (x+xdim0_preproc_kernel*(y)+xdim0_preproc_kernel*ydim0_preproc_kernel*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_preproc_kernel*(y)+xdim1_preproc_kernel*ydim1_preproc_kernel*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_preproc_kernel*(y)+xdim2_preproc_kernel*ydim2_preproc_kernel*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_preproc_kernel*(y)+xdim3_preproc_kernel*ydim3_preproc_kernel*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_preproc_kernel*(y)+xdim4_preproc_kernel*ydim4_preproc_kernel*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_preproc_kernel*(y)+xdim5_preproc_kernel*ydim5_preproc_kernel*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_preproc_kernel*(y)+xdim6_preproc_kernel*ydim6_preproc_kernel*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_preproc_kernel*(y)+xdim7_preproc_kernel*ydim7_preproc_kernel*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_preproc_kernel*(y)+xdim8_preproc_kernel*ydim8_preproc_kernel*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_preproc_kernel*(y)+xdim9_preproc_kernel*ydim9_preproc_kernel*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_preproc_kernel*(y)+xdim10_preproc_kernel*ydim10_preproc_kernel*(z))
//user function
__device__
void preproc_kernel_gpu(const double *u, double *du,
double *ax, double *bx, double *cx, double *ay, double *by, double *cy,
double *az, double *bz, double *cz, int *idx){
double a, b, c, d;
if(idx[0]==0 || idx[0]==nx-1 || idx[1]==0 || idx[1]==ny-1 || idx[2]==0 || idx[2]==nz-1) {
d = 0.0f;
a = 0.0f;
b = 1.0f;
c = 0.0f;
} else {
d = lambda*( u[OPS_ACC0(-1,0,0)] + u[OPS_ACC0(1,0,0)]
+ u[OPS_ACC0(0,-1,0)] + u[OPS_ACC0(0,1,0)]
+ u[OPS_ACC0(0,0,-1)] + u[OPS_ACC0(0,0,1)]
- 6.0f*u[OPS_ACC0(0,0,0)]);
a = -0.5f * lambda;
b = 1.0f + lambda;
c = -0.5f * lambda;
}
du[OPS_ACC1(0,0,0)] = d;
ax[OPS_ACC2(0,0,0)] = a;
bx[OPS_ACC3(0,0,0)] = b;
cx[OPS_ACC4(0,0,0)] = c;
ay[OPS_ACC5(0,0,0)] = a;
by[OPS_ACC6(0,0,0)] = b;
cy[OPS_ACC7(0,0,0)] = c;
az[OPS_ACC8(0,0,0)] = a;
bz[OPS_ACC9(0,0,0)] = b;
cz[OPS_ACC10(0,0,0)] = c;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_preproc_kernel(
const double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0+idx_x;
arg_idx[1] = arg_idx1+idx_y;
arg_idx[2] = arg_idx2+idx_z;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_preproc_kernel + idx_z * 1*1 * xdim0_preproc_kernel * ydim0_preproc_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_preproc_kernel + idx_z * 1*1 * xdim1_preproc_kernel * ydim1_preproc_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_preproc_kernel + idx_z * 1*1 * xdim2_preproc_kernel * ydim2_preproc_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_preproc_kernel + idx_z * 1*1 * xdim3_preproc_kernel * ydim3_preproc_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_preproc_kernel + idx_z * 1*1 * xdim4_preproc_kernel * ydim4_preproc_kernel;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_preproc_kernel + idx_z * 1*1 * xdim5_preproc_kernel * ydim5_preproc_kernel;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_preproc_kernel + idx_z * 1*1 * xdim6_preproc_kernel * ydim6_preproc_kernel;
arg7 += idx_x * 1*1 + idx_y * 1*1 * xdim7_preproc_kernel + idx_z * 1*1 * xdim7_preproc_kernel * ydim7_preproc_kernel;
arg8 += idx_x * 1*1 + idx_y * 1*1 * xdim8_preproc_kernel + idx_z * 1*1 * xdim8_preproc_kernel * ydim8_preproc_kernel;
arg9 += idx_x * 1*1 + idx_y * 1*1 * xdim9_preproc_kernel + idx_z * 1*1 * xdim9_preproc_kernel * ydim9_preproc_kernel;
arg10 += idx_x * 1*1 + idx_y * 1*1 * xdim10_preproc_kernel + idx_z * 1*1 * xdim10_preproc_kernel * ydim10_preproc_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
preproc_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11) {
#else
void ops_par_loop_preproc_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[12] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,12,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int arg_idx[3];
#ifdef OPS_MPI
#ifdef OPS_LAZY
ops_block block = desc->block;
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
arg_idx[2] = sb->decomp_disp[2]+start[2];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
arg_idx[2] = start[2];
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != xdim0_preproc_kernel_h || ydim0 != ydim0_preproc_kernel_h || xdim1 != xdim1_preproc_kernel_h || ydim1 != ydim1_preproc_kernel_h || xdim2 != xdim2_preproc_kernel_h || ydim2 != ydim2_preproc_kernel_h || xdim3 != xdim3_preproc_kernel_h || ydim3 != ydim3_preproc_kernel_h || xdim4 != xdim4_preproc_kernel_h || ydim4 != ydim4_preproc_kernel_h || xdim5 != xdim5_preproc_kernel_h || ydim5 != ydim5_preproc_kernel_h || xdim6 != xdim6_preproc_kernel_h || ydim6 != ydim6_preproc_kernel_h || xdim7 != xdim7_preproc_kernel_h || ydim7 != ydim7_preproc_kernel_h || xdim8 != xdim8_preproc_kernel_h || ydim8 != ydim8_preproc_kernel_h || xdim9 != xdim9_preproc_kernel_h || ydim9 != ydim9_preproc_kernel_h || xdim10 != xdim10_preproc_kernel_h || ydim10 != ydim10_preproc_kernel_h) {
hipMemcpyToSymbol( xdim0_preproc_kernel, &xdim0, sizeof(int) );
xdim0_preproc_kernel_h = xdim0;
hipMemcpyToSymbol( ydim0_preproc_kernel, &ydim0, sizeof(int) );
ydim0_preproc_kernel_h = ydim0;
hipMemcpyToSymbol( xdim1_preproc_kernel, &xdim1, sizeof(int) );
xdim1_preproc_kernel_h = xdim1;
hipMemcpyToSymbol( ydim1_preproc_kernel, &ydim1, sizeof(int) );
ydim1_preproc_kernel_h = ydim1;
hipMemcpyToSymbol( xdim2_preproc_kernel, &xdim2, sizeof(int) );
xdim2_preproc_kernel_h = xdim2;
hipMemcpyToSymbol( ydim2_preproc_kernel, &ydim2, sizeof(int) );
ydim2_preproc_kernel_h = ydim2;
hipMemcpyToSymbol( xdim3_preproc_kernel, &xdim3, sizeof(int) );
xdim3_preproc_kernel_h = xdim3;
hipMemcpyToSymbol( ydim3_preproc_kernel, &ydim3, sizeof(int) );
ydim3_preproc_kernel_h = ydim3;
hipMemcpyToSymbol( xdim4_preproc_kernel, &xdim4, sizeof(int) );
xdim4_preproc_kernel_h = xdim4;
hipMemcpyToSymbol( ydim4_preproc_kernel, &ydim4, sizeof(int) );
ydim4_preproc_kernel_h = ydim4;
hipMemcpyToSymbol( xdim5_preproc_kernel, &xdim5, sizeof(int) );
xdim5_preproc_kernel_h = xdim5;
hipMemcpyToSymbol( ydim5_preproc_kernel, &ydim5, sizeof(int) );
ydim5_preproc_kernel_h = ydim5;
hipMemcpyToSymbol( xdim6_preproc_kernel, &xdim6, sizeof(int) );
xdim6_preproc_kernel_h = xdim6;
hipMemcpyToSymbol( ydim6_preproc_kernel, &ydim6, sizeof(int) );
ydim6_preproc_kernel_h = ydim6;
hipMemcpyToSymbol( xdim7_preproc_kernel, &xdim7, sizeof(int) );
xdim7_preproc_kernel_h = xdim7;
hipMemcpyToSymbol( ydim7_preproc_kernel, &ydim7, sizeof(int) );
ydim7_preproc_kernel_h = ydim7;
hipMemcpyToSymbol( xdim8_preproc_kernel, &xdim8, sizeof(int) );
xdim8_preproc_kernel_h = xdim8;
hipMemcpyToSymbol( ydim8_preproc_kernel, &ydim8, sizeof(int) );
ydim8_preproc_kernel_h = ydim8;
hipMemcpyToSymbol( xdim9_preproc_kernel, &xdim9, sizeof(int) );
xdim9_preproc_kernel_h = xdim9;
hipMemcpyToSymbol( ydim9_preproc_kernel, &ydim9, sizeof(int) );
ydim9_preproc_kernel_h = ydim9;
hipMemcpyToSymbol( xdim10_preproc_kernel, &xdim10, sizeof(int) );
xdim10_preproc_kernel_h = xdim10;
hipMemcpyToSymbol( ydim10_preproc_kernel, &ydim10, sizeof(int) );
ydim10_preproc_kernel_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[12];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 12);
ops_halo_exchanges(args,12,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_preproc_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 12);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[9],range);
ops_set_halo_dirtybit3(&args[10],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 12;
desc->args = (ops_arg*)malloc(12*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->function = ops_par_loop_preproc_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 9e78ab5277c488678d4cd678d39abd543b01fbc4.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_preproc_kernel;
int xdim0_preproc_kernel_h = -1;
__constant__ int ydim0_preproc_kernel;
int ydim0_preproc_kernel_h = -1;
__constant__ int xdim1_preproc_kernel;
int xdim1_preproc_kernel_h = -1;
__constant__ int ydim1_preproc_kernel;
int ydim1_preproc_kernel_h = -1;
__constant__ int xdim2_preproc_kernel;
int xdim2_preproc_kernel_h = -1;
__constant__ int ydim2_preproc_kernel;
int ydim2_preproc_kernel_h = -1;
__constant__ int xdim3_preproc_kernel;
int xdim3_preproc_kernel_h = -1;
__constant__ int ydim3_preproc_kernel;
int ydim3_preproc_kernel_h = -1;
__constant__ int xdim4_preproc_kernel;
int xdim4_preproc_kernel_h = -1;
__constant__ int ydim4_preproc_kernel;
int ydim4_preproc_kernel_h = -1;
__constant__ int xdim5_preproc_kernel;
int xdim5_preproc_kernel_h = -1;
__constant__ int ydim5_preproc_kernel;
int ydim5_preproc_kernel_h = -1;
__constant__ int xdim6_preproc_kernel;
int xdim6_preproc_kernel_h = -1;
__constant__ int ydim6_preproc_kernel;
int ydim6_preproc_kernel_h = -1;
__constant__ int xdim7_preproc_kernel;
int xdim7_preproc_kernel_h = -1;
__constant__ int ydim7_preproc_kernel;
int ydim7_preproc_kernel_h = -1;
__constant__ int xdim8_preproc_kernel;
int xdim8_preproc_kernel_h = -1;
__constant__ int ydim8_preproc_kernel;
int ydim8_preproc_kernel_h = -1;
__constant__ int xdim9_preproc_kernel;
int xdim9_preproc_kernel_h = -1;
__constant__ int ydim9_preproc_kernel;
int ydim9_preproc_kernel_h = -1;
__constant__ int xdim10_preproc_kernel;
int xdim10_preproc_kernel_h = -1;
__constant__ int ydim10_preproc_kernel;
int ydim10_preproc_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#define OPS_ACC0(x,y,z) (x+xdim0_preproc_kernel*(y)+xdim0_preproc_kernel*ydim0_preproc_kernel*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_preproc_kernel*(y)+xdim1_preproc_kernel*ydim1_preproc_kernel*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_preproc_kernel*(y)+xdim2_preproc_kernel*ydim2_preproc_kernel*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_preproc_kernel*(y)+xdim3_preproc_kernel*ydim3_preproc_kernel*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_preproc_kernel*(y)+xdim4_preproc_kernel*ydim4_preproc_kernel*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_preproc_kernel*(y)+xdim5_preproc_kernel*ydim5_preproc_kernel*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_preproc_kernel*(y)+xdim6_preproc_kernel*ydim6_preproc_kernel*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_preproc_kernel*(y)+xdim7_preproc_kernel*ydim7_preproc_kernel*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_preproc_kernel*(y)+xdim8_preproc_kernel*ydim8_preproc_kernel*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_preproc_kernel*(y)+xdim9_preproc_kernel*ydim9_preproc_kernel*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_preproc_kernel*(y)+xdim10_preproc_kernel*ydim10_preproc_kernel*(z))
//user function
__device__
void preproc_kernel_gpu(const double *u, double *du,
double *ax, double *bx, double *cx, double *ay, double *by, double *cy,
double *az, double *bz, double *cz, int *idx){
double a, b, c, d;
if(idx[0]==0 || idx[0]==nx-1 || idx[1]==0 || idx[1]==ny-1 || idx[2]==0 || idx[2]==nz-1) {
d = 0.0f;
a = 0.0f;
b = 1.0f;
c = 0.0f;
} else {
d = lambda*( u[OPS_ACC0(-1,0,0)] + u[OPS_ACC0(1,0,0)]
+ u[OPS_ACC0(0,-1,0)] + u[OPS_ACC0(0,1,0)]
+ u[OPS_ACC0(0,0,-1)] + u[OPS_ACC0(0,0,1)]
- 6.0f*u[OPS_ACC0(0,0,0)]);
a = -0.5f * lambda;
b = 1.0f + lambda;
c = -0.5f * lambda;
}
du[OPS_ACC1(0,0,0)] = d;
ax[OPS_ACC2(0,0,0)] = a;
bx[OPS_ACC3(0,0,0)] = b;
cx[OPS_ACC4(0,0,0)] = c;
ay[OPS_ACC5(0,0,0)] = a;
by[OPS_ACC6(0,0,0)] = b;
cy[OPS_ACC7(0,0,0)] = c;
az[OPS_ACC8(0,0,0)] = a;
bz[OPS_ACC9(0,0,0)] = b;
cz[OPS_ACC10(0,0,0)] = c;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_preproc_kernel(
const double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0+idx_x;
arg_idx[1] = arg_idx1+idx_y;
arg_idx[2] = arg_idx2+idx_z;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_preproc_kernel + idx_z * 1*1 * xdim0_preproc_kernel * ydim0_preproc_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_preproc_kernel + idx_z * 1*1 * xdim1_preproc_kernel * ydim1_preproc_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_preproc_kernel + idx_z * 1*1 * xdim2_preproc_kernel * ydim2_preproc_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_preproc_kernel + idx_z * 1*1 * xdim3_preproc_kernel * ydim3_preproc_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_preproc_kernel + idx_z * 1*1 * xdim4_preproc_kernel * ydim4_preproc_kernel;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_preproc_kernel + idx_z * 1*1 * xdim5_preproc_kernel * ydim5_preproc_kernel;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_preproc_kernel + idx_z * 1*1 * xdim6_preproc_kernel * ydim6_preproc_kernel;
arg7 += idx_x * 1*1 + idx_y * 1*1 * xdim7_preproc_kernel + idx_z * 1*1 * xdim7_preproc_kernel * ydim7_preproc_kernel;
arg8 += idx_x * 1*1 + idx_y * 1*1 * xdim8_preproc_kernel + idx_z * 1*1 * xdim8_preproc_kernel * ydim8_preproc_kernel;
arg9 += idx_x * 1*1 + idx_y * 1*1 * xdim9_preproc_kernel + idx_z * 1*1 * xdim9_preproc_kernel * ydim9_preproc_kernel;
arg10 += idx_x * 1*1 + idx_y * 1*1 * xdim10_preproc_kernel + idx_z * 1*1 * xdim10_preproc_kernel * ydim10_preproc_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
preproc_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11) {
#else
void ops_par_loop_preproc_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[12] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,12,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int arg_idx[3];
#ifdef OPS_MPI
#ifdef OPS_LAZY
ops_block block = desc->block;
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
arg_idx[2] = sb->decomp_disp[2]+start[2];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
arg_idx[2] = start[2];
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != xdim0_preproc_kernel_h || ydim0 != ydim0_preproc_kernel_h || xdim1 != xdim1_preproc_kernel_h || ydim1 != ydim1_preproc_kernel_h || xdim2 != xdim2_preproc_kernel_h || ydim2 != ydim2_preproc_kernel_h || xdim3 != xdim3_preproc_kernel_h || ydim3 != ydim3_preproc_kernel_h || xdim4 != xdim4_preproc_kernel_h || ydim4 != ydim4_preproc_kernel_h || xdim5 != xdim5_preproc_kernel_h || ydim5 != ydim5_preproc_kernel_h || xdim6 != xdim6_preproc_kernel_h || ydim6 != ydim6_preproc_kernel_h || xdim7 != xdim7_preproc_kernel_h || ydim7 != ydim7_preproc_kernel_h || xdim8 != xdim8_preproc_kernel_h || ydim8 != ydim8_preproc_kernel_h || xdim9 != xdim9_preproc_kernel_h || ydim9 != ydim9_preproc_kernel_h || xdim10 != xdim10_preproc_kernel_h || ydim10 != ydim10_preproc_kernel_h) {
cudaMemcpyToSymbol( xdim0_preproc_kernel, &xdim0, sizeof(int) );
xdim0_preproc_kernel_h = xdim0;
cudaMemcpyToSymbol( ydim0_preproc_kernel, &ydim0, sizeof(int) );
ydim0_preproc_kernel_h = ydim0;
cudaMemcpyToSymbol( xdim1_preproc_kernel, &xdim1, sizeof(int) );
xdim1_preproc_kernel_h = xdim1;
cudaMemcpyToSymbol( ydim1_preproc_kernel, &ydim1, sizeof(int) );
ydim1_preproc_kernel_h = ydim1;
cudaMemcpyToSymbol( xdim2_preproc_kernel, &xdim2, sizeof(int) );
xdim2_preproc_kernel_h = xdim2;
cudaMemcpyToSymbol( ydim2_preproc_kernel, &ydim2, sizeof(int) );
ydim2_preproc_kernel_h = ydim2;
cudaMemcpyToSymbol( xdim3_preproc_kernel, &xdim3, sizeof(int) );
xdim3_preproc_kernel_h = xdim3;
cudaMemcpyToSymbol( ydim3_preproc_kernel, &ydim3, sizeof(int) );
ydim3_preproc_kernel_h = ydim3;
cudaMemcpyToSymbol( xdim4_preproc_kernel, &xdim4, sizeof(int) );
xdim4_preproc_kernel_h = xdim4;
cudaMemcpyToSymbol( ydim4_preproc_kernel, &ydim4, sizeof(int) );
ydim4_preproc_kernel_h = ydim4;
cudaMemcpyToSymbol( xdim5_preproc_kernel, &xdim5, sizeof(int) );
xdim5_preproc_kernel_h = xdim5;
cudaMemcpyToSymbol( ydim5_preproc_kernel, &ydim5, sizeof(int) );
ydim5_preproc_kernel_h = ydim5;
cudaMemcpyToSymbol( xdim6_preproc_kernel, &xdim6, sizeof(int) );
xdim6_preproc_kernel_h = xdim6;
cudaMemcpyToSymbol( ydim6_preproc_kernel, &ydim6, sizeof(int) );
ydim6_preproc_kernel_h = ydim6;
cudaMemcpyToSymbol( xdim7_preproc_kernel, &xdim7, sizeof(int) );
xdim7_preproc_kernel_h = xdim7;
cudaMemcpyToSymbol( ydim7_preproc_kernel, &ydim7, sizeof(int) );
ydim7_preproc_kernel_h = ydim7;
cudaMemcpyToSymbol( xdim8_preproc_kernel, &xdim8, sizeof(int) );
xdim8_preproc_kernel_h = xdim8;
cudaMemcpyToSymbol( ydim8_preproc_kernel, &ydim8, sizeof(int) );
ydim8_preproc_kernel_h = ydim8;
cudaMemcpyToSymbol( xdim9_preproc_kernel, &xdim9, sizeof(int) );
xdim9_preproc_kernel_h = xdim9;
cudaMemcpyToSymbol( ydim9_preproc_kernel, &ydim9, sizeof(int) );
ydim9_preproc_kernel_h = ydim9;
cudaMemcpyToSymbol( xdim10_preproc_kernel, &xdim10, sizeof(int) );
xdim10_preproc_kernel_h = xdim10;
cudaMemcpyToSymbol( ydim10_preproc_kernel, &ydim10, sizeof(int) );
ydim10_preproc_kernel_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[12];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 12);
ops_halo_exchanges(args,12,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_preproc_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 12);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[9],range);
ops_set_halo_dirtybit3(&args[10],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 12;
desc->args = (ops_arg*)malloc(12*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->function = ops_par_loop_preproc_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
cf485290b5d453e91b76913c34828cb42aaeb3b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 40
__global__ void kernel(int* A, int* X, int* B, int num)
{
__shared__ int sum[32];
int proizvod;
int k = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int indA = i * num + k;
sum[threadIdx.y] = 0;
__syncthreads();
if (i < num && k < num)
proizvod = A[indA] * X[k];
else
proizvod = 0;
atomicAdd(&sum[threadIdx.y], proizvod);
__syncthreads();
if (threadIdx.x == 0)
atomicAdd(&B[i],sum[threadIdx.y]);
}
void Hostkenrel(int* A, int* X, int* B)
{
for (int i = 0; i < N; i++)
{
B[i] = 0;
for (int k = 0; k < N; k++)
{
B[i] += A[i * N + k] * X[k];
}
}
}
void PrintVec(int* vec) {
for (int i = 0; i < N; i++)
printf("%d ", vec[i]);
printf("\n");
}
int main()
{
int A[N * N], X[N], B[N], Bh[N];
int* Ad, * Xd, * Bd;
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
A[i * N + j] = j + j;
printf("%d ", A[i * N + j]);
}
X[i] = i;
B[i] = 0;
printf("\n");
}
printf("\n");
PrintVec(X);
hipMalloc((void**)&Ad, sizeof(int) * N * N);
hipMalloc((void**)&Xd, sizeof(int) * N);
hipMalloc((void**)&Bd, sizeof(int) * N);
hipMemcpy(Ad, A, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(Xd, X, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(Bd, B, sizeof(int) * N, hipMemcpyHostToDevice);
dim3 gridSize((N + 31) / 32, (N + 31) / 32);
dim3 blockSize(32, 32);
kernel << <gridSize, blockSize >> > (Ad, Xd, Bd, N);
hipMemcpy(B, Bd, sizeof(int) * N, hipMemcpyDeviceToHost);
Hostkenrel(A, X, Bh);
PrintVec(B);
printf("\n");
PrintVec(Bh);
hipFree(Ad);
hipFree(Bd);
hipFree(Xd);
return 0;
} | cf485290b5d453e91b76913c34828cb42aaeb3b5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 40
__global__ void kernel(int* A, int* X, int* B, int num)
{
__shared__ int sum[32];
int proizvod;
int k = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int indA = i * num + k;
sum[threadIdx.y] = 0;
__syncthreads();
if (i < num && k < num)
proizvod = A[indA] * X[k];
else
proizvod = 0;
atomicAdd(&sum[threadIdx.y], proizvod);
__syncthreads();
if (threadIdx.x == 0)
atomicAdd(&B[i],sum[threadIdx.y]);
}
void Hostkenrel(int* A, int* X, int* B)
{
for (int i = 0; i < N; i++)
{
B[i] = 0;
for (int k = 0; k < N; k++)
{
B[i] += A[i * N + k] * X[k];
}
}
}
void PrintVec(int* vec) {
for (int i = 0; i < N; i++)
printf("%d ", vec[i]);
printf("\n");
}
int main()
{
int A[N * N], X[N], B[N], Bh[N];
int* Ad, * Xd, * Bd;
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
A[i * N + j] = j + j;
printf("%d ", A[i * N + j]);
}
X[i] = i;
B[i] = 0;
printf("\n");
}
printf("\n");
PrintVec(X);
cudaMalloc((void**)&Ad, sizeof(int) * N * N);
cudaMalloc((void**)&Xd, sizeof(int) * N);
cudaMalloc((void**)&Bd, sizeof(int) * N);
cudaMemcpy(Ad, A, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(Xd, X, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, sizeof(int) * N, cudaMemcpyHostToDevice);
dim3 gridSize((N + 31) / 32, (N + 31) / 32);
dim3 blockSize(32, 32);
kernel << <gridSize, blockSize >> > (Ad, Xd, Bd, N);
cudaMemcpy(B, Bd, sizeof(int) * N, cudaMemcpyDeviceToHost);
Hostkenrel(A, X, Bh);
PrintVec(B);
printf("\n");
PrintVec(Bh);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Xd);
return 0;
} |
2d0c9944ed6bde3475bf5c021e688f00fd783716.hip | // !!! This is a file automatically generated by hipify!!!
//=========================================================================================
// DETAILS ABOUT THE SUBMISSION
//=========================================================================================
// Name: Jokubas Liutkus
// ID: 1601768
//
// Goals Achieved (all goals were achieved):
// 1. block scan +
// 2. full scan for large vectors +
// 3. Bank conflict avoidance optimization (BCAO) +
//
// Your time, in milliseconds to execute the different scans on a vector of 10,000,000 entries:
// Block scan without BCAO: 2.25990ms
// Block scan with BCAO: 1.37642ms
// Full scan without BCAO: 3.23677ms
// Full scan with BCAO: 2.35392ms
//
// CPU model: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz x 4 (lab machine)
// GPU model: GeForce GTX 960 (lab machine)
//
// Improvements/Additional features:
// 1. All the multiplications were replaced with bit shifting which improved the performance
// 2. Extract sum phase was merged with the main block scan function removing additional kernel call
// 3. Padded the shared memory array of the last block to zero while loading in the data from device memory
// to get the faster operations on the last block elements as described in the paper.
// 4. Saving the last element of each block to the local variable from the shared memory
// before running reduction and distribution phases rather than loading it later from a global memory
// 5. Double block scan implemented which doubles the number of elements scanned by each block.
// 6. Offset variable was changed from doubling each time to addition to enable the shifting of elements
// in the reduction and distribution phases.
//=========================================================================================
//=========================================================================================
//=========================================================================================
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <math.h>
// A helper macro to simplify handling cuda error checking
#define CUDA_ERROR( err, msg ) { \
if (err != hipSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, hipGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
// Block size and its double version
// version with 128 block size showed the best performance
#define BLOCK_SIZE 128
#define BLOCK_SIZE_TWICE BLOCK_SIZE*2
// for avoiding bank conflicts
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) (((n) >> LOG_NUM_BANKS) + ((n) >> (2 * LOG_NUM_BANKS)))
// for comparing the results with the host version
static void compare_results(const int *vector1, const int *vector2,
int numElements) {
for (int i = 0; i < numElements; ++i) {
if (fabs(vector1[i] != vector2[i]) ) {
printf("%d ----------- %d\n", vector1[i], vector2[i]);
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
}
// sequential scan version for speed and correctness comparison
__host__
void sequential_scan(int *g_idata, int *g_odata, int n) {
g_odata[0] = 0;
for (int i = 1; i < n; i++) {
g_odata[i] = g_odata[i - 1] + g_idata[i - 1];
}
}
// -----------------------------------------------------------
// ADITTION OPERATION FOR
// MULTILEVEL BLOCK SCANS
// -----------------------------------------------------------
__global__
void add_to_block(int *block, int len_block, int *SUM) {
// get the value of the element that has to be
// added to the main vector. We do not need to worry
// about accessing out of bounds because this function
// is called with the same number of blocks as the size of SUM array.
int s = SUM[blockIdx.x];
// get the address the vector that has to be updated
int addr = blockIdx.x * BLOCK_SIZE_TWICE + threadIdx.x;
__syncthreads();
// update two elements in the vector
if (addr < len_block)
block[addr] += s;
if (addr + blockDim.x < len_block)
block[addr + blockDim.x] += s;
}
__global__
void block_scan_full_BCAO(int *g_idata, int *g_odata, int n, int *SUM,
int add_last) {
// shared memory initialised to contain more than
// twice memory due to the offset because it might
// go out of bounds of the double array and the segfault
// would be received
__shared__ int temp[BLOCK_SIZE_TWICE + (BLOCK_SIZE >> 4)];
// local variables for the later usage to improve the performance
int thid = threadIdx.x;
int thid_shift = thid << 1;
int blockId = blockIdx.x * (BLOCK_SIZE << 1);
int offset = 0;
int last = 0;
// offset to avoid bank conflicts
int ai = thid;
int bi = thid + BLOCK_SIZE;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// load the elements from global memory into the shared memory
if (blockId + ai < n)
temp[ai + bankOffsetA] = g_idata[blockId + ai];
else
temp[ai + bankOffsetA] = 0;
if (blockId + bi < n)
temp[bi + bankOffsetB] = g_idata[blockId + bi];
else
temp[bi + bankOffsetB] = 0;
// save the last element for later to improve the performance
if (add_last && thid == BLOCK_SIZE - 1)
last = temp[BLOCK_SIZE_TWICE - 1
+ CONFLICT_FREE_OFFSET((BLOCK_SIZE << 1) - 1)];
// build sum in place up the tree (reduction phase)
for (int d = BLOCK_SIZE; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = ((thid_shift + 1) << offset) - 1;
int bi = ((thid_shift + 2) << offset) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset++;
}
// update the last element to 0
if (thid == 0) {
temp[BLOCK_SIZE_TWICE - 1 + CONFLICT_FREE_OFFSET(BLOCK_SIZE_TWICE - 1)] =
0;
}
// traverse down tree & build scan (distribution phase)
for (int d = 1; d < BLOCK_SIZE_TWICE; d <<= 1) {
offset--;
__syncthreads();
if (thid < d) {
int ai = ((thid_shift + 1) << offset) - 1;
int bi = ((thid_shift + 2) << offset) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// extract the sum (merged to improve the performance)
if (add_last && thid == BLOCK_SIZE - 1)
SUM[blockIdx.x] = temp[BLOCK_SIZE_TWICE - 1
+ CONFLICT_FREE_OFFSET(BLOCK_SIZE_TWICE - 1)] + last;
// update the output vector by loading shared memory into the global memory
if (blockId + ai < n)
g_odata[blockId + ai] = temp[ai + bankOffsetA];
if (blockId + bi < n)
g_odata[blockId + bi] = temp[bi + bankOffsetB];
}
__host__
void full_block_scan_BCAO(int *h_IN, int *h_OUT, int len) {
// -----------------------------------------------------------
// INITIALIZATION
// -----------------------------------------------------------
// error code to check return values for CUDA class
hipError_t err = hipSuccess;
// size to allocate for the vectors
size_t size = len * sizeof(int);
// create device timer
hipEvent_t d_start, d_stop;
float d_msecs;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
// allocate memory for all the possible vectors needed for the execution
int *d_IN = NULL;
err = hipMalloc((void **) &d_IN, size);
CUDA_ERROR(err, "Failed to allocate device vector IN");
int *d_OUT = NULL;
err = hipMalloc((void**) &d_OUT, size);
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1 = NULL;
err = hipMalloc((void**) &d_SUM_1,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1_Scanned = NULL;
err = hipMalloc((void**) &d_SUM_1_Scanned,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2 = NULL;
err = hipMalloc((void**) &d_SUM_2, (BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2_Scanned = NULL;
err = hipMalloc((void**) &d_SUM_2_Scanned,
(BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
// copy the memory from the host to the device
err = hipMemcpy(d_IN, h_IN, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array IN from host to device");
// size of the grid for each level
int blocksPerGridLevel1 = 1 + ((len - 1) / (BLOCK_SIZE * 2));
int blocksPerGridLevel2 = 1 + ceil(blocksPerGridLevel1 / (BLOCK_SIZE << 1));
int blocksPerGridLevel3 = 1 + ceil(blocksPerGridLevel2 / (BLOCK_SIZE << 1));
// -----------------------------------------------------------
// EXECUTION
// -----------------------------------------------------------
// choosing the level on which to run the kernels
// based on the size of the grids
// if level one grid size is equal to 1 then single
// LEVEL 1 is enough to scan the whole array
if (blocksPerGridLevel1 == 1) {
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernel
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT,
len,
NULL, 0);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}
// if level two grid size is equal to 1 then two (LEVEL 2)
// scans are required to scan the whole array
else if (blocksPerGridLevel2 == 1) {
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernels
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT,
len, d_SUM_1, 1);
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(blocksPerGridLevel2), dim3(BLOCK_SIZE), 0, 0, d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, NULL, 0);
hipLaunchKernelGGL(( add_to_block), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}
// if level 3 grid size is equal to 1 then three (LEVEL 3)
// scans are required to scan the whole array
else if (blocksPerGridLevel3 == 1) {
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernels
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT,
len, d_SUM_1, 1);
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(blocksPerGridLevel2), dim3(BLOCK_SIZE), 0, 0, d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, d_SUM_2, 1);
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_SUM_2, d_SUM_2_Scanned,
blocksPerGridLevel2, NULL, 0);
hipLaunchKernelGGL(( add_to_block), dim3(blocksPerGridLevel2), dim3(BLOCK_SIZE), 0, 0, d_SUM_1_Scanned,
blocksPerGridLevel1, d_SUM_2_Scanned);
hipLaunchKernelGGL(( add_to_block), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}
// if none of the conditions above is met, it means that the array is too
// large to be scanned in 3 level scan, therefore we print the error message
// and return
else {
fprintf(stderr,
"The array size=%d is too large to be scanned with level 3 scan!\n",
len);
// using goto is discouraged, however, in such situations
// where in the error conditions exit or cleanup is required
// it is considered idiomatic
goto cleanup;
}
// check whether the run was successful
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the kernels to execute
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf(
"Full block with bank avoidance scan with %d elements took = %.5fmSecs\n",
len, d_msecs);
// copy the result from the device back to the host
err = hipMemcpy(h_OUT, d_OUT, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array OUT from device to host");
// -----------------------------------------------------------
// CLEANUP
// -----------------------------------------------------------
cleanup:
// Free device global memory
CUDA_ERROR(hipFree(d_IN), "Failed to free device vector IN");
CUDA_ERROR(hipFree(d_OUT), "Failed to free device vector OUT");
CUDA_ERROR(hipFree(d_SUM_1), "Failed to free device vector SUM_1");
CUDA_ERROR(hipFree(d_SUM_1_Scanned),
"Failed to free device vector SUM_1_Scanned");
CUDA_ERROR(hipFree(d_SUM_2), "Failed to free device vector SUM_2");
CUDA_ERROR(hipFree(d_SUM_2_Scanned),
"Failed to free device vector SUM_2_Scanned");
// Clean up the Device timer event objects
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
// Reset the device and exit
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
__global__
void block_scan_full(int *g_idata, int *g_odata, int n, int *SUM,
int add_last) {
// shared memory init
__shared__ int temp[BLOCK_SIZE << 1];
// local variables for the later usage to improve the performance
int thid = threadIdx.x;
int blockId = blockDim.x * blockIdx.x << 1;
int offset = 0;
int last = 0;
// load the elements from global memory into the shared memory
if (blockId + (thid << 1) < n)
temp[thid << 1] = g_idata[blockId + (thid << 1)];
if (blockId + (thid << 1) + 1 < n)
temp[(thid << 1) + 1] = g_idata[blockId + (thid << 1) + 1];
// save the last element for later to improve the performance
if (add_last && thid == BLOCK_SIZE - 1)
last = temp[(thid << 1) + 1];
// build sum in place up the tree (reduction phase)
for (int d = BLOCK_SIZE; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = (((thid << 1) + 1) << offset) - 1;
int bi = (((thid << 1) + 2) << offset) - 1;
temp[bi] += temp[ai];
}
offset++;
}
// clear the last element
if (thid == 0)
temp[(BLOCK_SIZE << 1) - 1] = 0;
// traverse down tree & build scan (distribution phase)
for (int d = 1; d < (BLOCK_SIZE << 1); d <<= 1) {
offset--;
__syncthreads();
if (thid < d) {
int ai = (((thid << 1) + 1) << offset) - 1;
int bi = (((thid << 1) + 2) << offset) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// extract the sum (merged to improve the performance)
if (add_last && thid == BLOCK_SIZE - 1)
SUM[blockIdx.x] = temp[(thid << 1) + 1] + last;
// update the output vector by loading shared memory into the global memory
if (blockId + (thid << 1) < n)
g_odata[blockId + (thid << 1)] = temp[thid << 1];
if (blockId + (thid << 1) + 1 < n)
g_odata[blockId + (thid << 1) + 1] = temp[(thid << 1) + 1];
}
__host__
void full_block_scan(int *h_IN, int *h_OUT, int len) {
// -----------------------------------------------------------
// INITIALIZATION
// -----------------------------------------------------------
// error code to check return values for CUDA class
hipError_t err = hipSuccess;
// size to allocate for the vectors
size_t size = len * sizeof(int);
// create device timer
hipEvent_t d_start, d_stop;
float d_msecs;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
// allocate memory for all the possible vectors needed for the execution
int *d_IN = NULL;
err = hipMalloc((void **) &d_IN, size);
CUDA_ERROR(err, "Failed to allocate device vector IN");
int *d_OUT = NULL;
err = hipMalloc((void**) &d_OUT, size);
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1 = NULL;
err = hipMalloc((void**) &d_SUM_1,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1_Scanned = NULL;
err = hipMalloc((void**) &d_SUM_1_Scanned,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2 = NULL;
err = hipMalloc((void**) &d_SUM_2, (BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2_Scanned = NULL;
err = hipMalloc((void**) &d_SUM_2_Scanned,
(BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
// copy the memory from the host to the device
err = hipMemcpy(d_IN, h_IN, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array IN from host to device");
// size of the grid for each level
int blocksPerGridLevel1 = 1 + ((len - 1) / (BLOCK_SIZE * 2));
int blocksPerGridLevel2 = 1 + ceil(blocksPerGridLevel1 / (BLOCK_SIZE << 1));
int blocksPerGridLevel3 = 1 + ceil(blocksPerGridLevel2 / (BLOCK_SIZE << 1));
// -----------------------------------------------------------
// EXECUTION
// -----------------------------------------------------------
// choosing the level on which to run the kernels
// based on the size of the grids
// if level one grid size is equal to 1 then single
// LEVEL 1 is enough to scan the whole array
if (blocksPerGridLevel1 == 1) {
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernel
hipLaunchKernelGGL(( block_scan_full), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT, len,
NULL, 0);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}
// if level two grid size is equal to 1 then two (LEVEL 2)
// scans are required to scan the whole array
else if (blocksPerGridLevel2 == 1) {
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernels
hipLaunchKernelGGL(( block_scan_full), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT, len,
d_SUM_1, 1);
hipLaunchKernelGGL(( block_scan_full), dim3(blocksPerGridLevel2), dim3(BLOCK_SIZE), 0, 0, d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, NULL, 0);
hipLaunchKernelGGL(( add_to_block), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}
// if level 3 grid size is equal to 1 then three (LEVEL 3)
// scans are required to scan the whole array
else if (blocksPerGridLevel3 == 1) {
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernels
hipLaunchKernelGGL(( block_scan_full), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT, len,
d_SUM_1, 1);
hipLaunchKernelGGL(( block_scan_full), dim3(blocksPerGridLevel2), dim3(BLOCK_SIZE), 0, 0, d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, d_SUM_2, 1);
hipLaunchKernelGGL(( block_scan_full), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_SUM_2, d_SUM_2_Scanned,
blocksPerGridLevel2, NULL, 0);
hipLaunchKernelGGL(( add_to_block), dim3(blocksPerGridLevel2), dim3(BLOCK_SIZE), 0, 0, d_SUM_1_Scanned,
blocksPerGridLevel1, d_SUM_2_Scanned);
hipLaunchKernelGGL(( add_to_block), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}
// if none of the conditions above is met, it means that the array is too
// large to be scanned in 3 level scan, therefore we print the error message and return
else {
fprintf(stderr,
"The array size=%d is too large to be scanned with level 3 scan!\n",
len);
// using goto is discouraged, however, in such situations
// where in the error conditions exit or cleanup is required
// it is considered idiomatic
goto cleanup;
}
// check whether the run was successful
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the kernels to execute
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf("Full block scan with %d elements took = %.5fmSecs\n", len, d_msecs);
// copy the result from the device back to the host
err = hipMemcpy(h_OUT, d_OUT, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array OUT from device to host");
// -----------------------------------------------------------
// CLEANUP
// -----------------------------------------------------------
cleanup:
// Free device global memory
CUDA_ERROR(hipFree(d_IN), "Failed to free device vector IN");
CUDA_ERROR(hipFree(d_OUT), "Failed to free device vector OUT");
CUDA_ERROR(hipFree(d_SUM_1), "Failed to free device vector SUM_1");
CUDA_ERROR(hipFree(d_SUM_1_Scanned),
"Failed to free device vector SUM_1_Scanned");
CUDA_ERROR(hipFree(d_SUM_2), "Failed to free device vector SUM_2");
CUDA_ERROR(hipFree(d_SUM_2_Scanned),
"Failed to free device vector SUM_2_Scanned");
// Clean up the Device timer event objects
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
// Reset the device and exit
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
/**
* Host main routine
*/
int main(void) {
// error code to check return calues for CUDA calss
hipError_t err = hipSuccess;
// create host stopwatch times
StopWatchInterface * timer = NULL;
sdkCreateTimer(&timer);
double h_msecs;
// size of the array to add
int numElements = 10000000;
size_t size = numElements * sizeof(int);
// allocate the memory on the host for the arrays
int *h_IN = (int *) malloc(size);
int *h_OUT = (int *) malloc(size);
int *h_OUT_CUDA = (int *) malloc(size);
// verify the host allocations
if (h_IN == NULL || h_OUT == NULL || h_OUT_CUDA == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// initialise the host input to 1.0f
for (int i = 0; i < numElements; i++) {
h_IN[i] = rand() % 10;
}
// sequential scan
sdkStartTimer(&timer);
sequential_scan(h_IN, h_OUT, numElements);
sdkStopTimer(&timer);
h_msecs = sdkGetTimerValue(&timer);
printf("Sequential scan on host of %d elements took = %.5fmSecs\n",
numElements, h_msecs);
// -----------------------------------------------------------
// PERFROM FULL BLOCK WITH BCAO
// -----------------------------------------------------------
full_block_scan_BCAO(h_IN, h_OUT_CUDA, numElements);
compare_results(h_OUT, h_OUT_CUDA, numElements);
// -----------------------------------------------------------
// PERFROM FULL BLOCK WITHOUT BCAO
// -----------------------------------------------------------
full_block_scan(h_IN, h_OUT_CUDA, numElements);
compare_results(h_OUT, h_OUT_CUDA, numElements);
// -----------------------------------------------------------
// SIMPLE BLOCK SCANS
// -----------------------------------------------------------
// create device timer
hipEvent_t d_start, d_stop;
float d_msecs;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
// allocate memory for all the possible vectors needed for the execution
int *d_IN = NULL;
err = hipMalloc((void **) &d_IN, size);
CUDA_ERROR(err, "Failed to allocate device vector IN");
int *d_OUT = NULL;
err = hipMalloc((void**) &d_OUT, size);
CUDA_ERROR(err, "Failed to allocate device vector OUT");
// copy the memory from the host to the device
err = hipMemcpy(d_IN, h_IN, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array IN from host to device");
// size of the grid for each level
int blocksPerGridLevel1 = 1 + ((numElements - 1) / (BLOCK_SIZE * 2));
// -----------------------------------------------------------
// BLOCK SCAN WITH BCAO
// -----------------------------------------------------------
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernel
hipLaunchKernelGGL(( block_scan_full_BCAO), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT,
numElements,
NULL, 0);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
// check whether the run was successful
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the kernels to execute
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf("Block with bank avoidance scan %d elements took = %.5fmSecs\n",
numElements, d_msecs);
// -----------------------------------------------------------
// BLOCK SCAN WITHOUT BCAO
// -----------------------------------------------------------
// record the start time
hipEventRecord(d_start, 0);
// execute the actual kernel
hipLaunchKernelGGL(( block_scan_full), dim3(blocksPerGridLevel1), dim3(BLOCK_SIZE), 0, 0, d_IN, d_OUT,
numElements,
NULL, 0);
// record the stop time
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
// check whether the run was successful
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the k// save the last element for later to improve the performanceernels to execute
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf("Block scan %d elements took = %.5fmSecs\n", numElements, d_msecs);
// Free device global memory
CUDA_ERROR(hipFree(d_IN), "Failed to free device vector IN");
CUDA_ERROR(hipFree(d_OUT), "Failed to free device vector OUT");
// Clean up the Device timer event objects
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
// Reset the device and exit
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
// Free host memory
free(h_IN);
free(h_OUT);
free(h_OUT_CUDA);
// Clean up the Host timer
sdkDeleteTimer(&timer);
return 0;
}
| 2d0c9944ed6bde3475bf5c021e688f00fd783716.cu | //=========================================================================================
// DETAILS ABOUT THE SUBMISSION
//=========================================================================================
// Name: Jokubas Liutkus
// ID: 1601768
//
// Goals Achieved (all goals were achieved):
// 1. block scan +
// 2. full scan for large vectors +
// 3. Bank conflict avoidance optimization (BCAO) +
//
// Your time, in milliseconds to execute the different scans on a vector of 10,000,000 entries:
// ∗ Block scan without BCAO: 2.25990ms
// ∗ Block scan with BCAO: 1.37642ms
// ∗ Full scan without BCAO: 3.23677ms
// ∗ Full scan with BCAO: 2.35392ms
//
// CPU model: Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz x 4 (lab machine)
// GPU model: GeForce GTX 960 (lab machine)
//
// Improvements/Additional features:
// 1. All the multiplications were replaced with bit shifting which improved the performance
// 2. Extract sum phase was merged with the main block scan function removing additional kernel call
// 3. Padded the shared memory array of the last block to zero while loading in the data from device memory
// to get the faster operations on the last block elements as described in the paper.
// 4. Saving the last element of each block to the local variable from the shared memory
// before running reduction and distribution phases rather than loading it later from a global memory
// 5. Double block scan implemented which doubles the number of elements scanned by each block.
// 6. Offset variable was changed from doubling each time to addition to enable the shifting of elements
// in the reduction and distribution phases.
//=========================================================================================
//=========================================================================================
//=========================================================================================
#include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <math.h>
// A helper macro to simplify handling cuda error checking
#define CUDA_ERROR( err, msg ) { \
if (err != cudaSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, cudaGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
// Block size and its double version
// version with 128 block size showed the best performance
#define BLOCK_SIZE 128
#define BLOCK_SIZE_TWICE BLOCK_SIZE*2
// for avoiding bank conflicts
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) (((n) >> LOG_NUM_BANKS) + ((n) >> (2 * LOG_NUM_BANKS)))
// for comparing the results with the host version
static void compare_results(const int *vector1, const int *vector2,
int numElements) {
for (int i = 0; i < numElements; ++i) {
if (fabs(vector1[i] != vector2[i]) ) {
printf("%d ----------- %d\n", vector1[i], vector2[i]);
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
}
// sequential scan version for speed and correctness comparison
__host__
void sequential_scan(int *g_idata, int *g_odata, int n) {
g_odata[0] = 0;
for (int i = 1; i < n; i++) {
g_odata[i] = g_odata[i - 1] + g_idata[i - 1];
}
}
// -----------------------------------------------------------
// ADITTION OPERATION FOR
// MULTILEVEL BLOCK SCANS
// -----------------------------------------------------------
__global__
void add_to_block(int *block, int len_block, int *SUM) {
// get the value of the element that has to be
// added to the main vector. We do not need to worry
// about accessing out of bounds because this function
// is called with the same number of blocks as the size of SUM array.
int s = SUM[blockIdx.x];
// get the address the vector that has to be updated
int addr = blockIdx.x * BLOCK_SIZE_TWICE + threadIdx.x;
__syncthreads();
// update two elements in the vector
if (addr < len_block)
block[addr] += s;
if (addr + blockDim.x < len_block)
block[addr + blockDim.x] += s;
}
__global__
void block_scan_full_BCAO(int *g_idata, int *g_odata, int n, int *SUM,
int add_last) {
// shared memory initialised to contain more than
// twice memory due to the offset because it might
// go out of bounds of the double array and the segfault
// would be received
__shared__ int temp[BLOCK_SIZE_TWICE + (BLOCK_SIZE >> 4)];
// local variables for the later usage to improve the performance
int thid = threadIdx.x;
int thid_shift = thid << 1;
int blockId = blockIdx.x * (BLOCK_SIZE << 1);
int offset = 0;
int last = 0;
// offset to avoid bank conflicts
int ai = thid;
int bi = thid + BLOCK_SIZE;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// load the elements from global memory into the shared memory
if (blockId + ai < n)
temp[ai + bankOffsetA] = g_idata[blockId + ai];
else
temp[ai + bankOffsetA] = 0;
if (blockId + bi < n)
temp[bi + bankOffsetB] = g_idata[blockId + bi];
else
temp[bi + bankOffsetB] = 0;
// save the last element for later to improve the performance
if (add_last && thid == BLOCK_SIZE - 1)
last = temp[BLOCK_SIZE_TWICE - 1
+ CONFLICT_FREE_OFFSET((BLOCK_SIZE << 1) - 1)];
// build sum in place up the tree (reduction phase)
for (int d = BLOCK_SIZE; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = ((thid_shift + 1) << offset) - 1;
int bi = ((thid_shift + 2) << offset) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset++;
}
// update the last element to 0
if (thid == 0) {
temp[BLOCK_SIZE_TWICE - 1 + CONFLICT_FREE_OFFSET(BLOCK_SIZE_TWICE - 1)] =
0;
}
// traverse down tree & build scan (distribution phase)
for (int d = 1; d < BLOCK_SIZE_TWICE; d <<= 1) {
offset--;
__syncthreads();
if (thid < d) {
int ai = ((thid_shift + 1) << offset) - 1;
int bi = ((thid_shift + 2) << offset) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// extract the sum (merged to improve the performance)
if (add_last && thid == BLOCK_SIZE - 1)
SUM[blockIdx.x] = temp[BLOCK_SIZE_TWICE - 1
+ CONFLICT_FREE_OFFSET(BLOCK_SIZE_TWICE - 1)] + last;
// update the output vector by loading shared memory into the global memory
if (blockId + ai < n)
g_odata[blockId + ai] = temp[ai + bankOffsetA];
if (blockId + bi < n)
g_odata[blockId + bi] = temp[bi + bankOffsetB];
}
__host__
void full_block_scan_BCAO(int *h_IN, int *h_OUT, int len) {
// -----------------------------------------------------------
// INITIALIZATION
// -----------------------------------------------------------
// error code to check return values for CUDA class
cudaError_t err = cudaSuccess;
// size to allocate for the vectors
size_t size = len * sizeof(int);
// create device timer
cudaEvent_t d_start, d_stop;
float d_msecs;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
// allocate memory for all the possible vectors needed for the execution
int *d_IN = NULL;
err = cudaMalloc((void **) &d_IN, size);
CUDA_ERROR(err, "Failed to allocate device vector IN");
int *d_OUT = NULL;
err = cudaMalloc((void**) &d_OUT, size);
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1 = NULL;
err = cudaMalloc((void**) &d_SUM_1,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1_Scanned = NULL;
err = cudaMalloc((void**) &d_SUM_1_Scanned,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2 = NULL;
err = cudaMalloc((void**) &d_SUM_2, (BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2_Scanned = NULL;
err = cudaMalloc((void**) &d_SUM_2_Scanned,
(BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
// copy the memory from the host to the device
err = cudaMemcpy(d_IN, h_IN, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array IN from host to device");
// size of the grid for each level
int blocksPerGridLevel1 = 1 + ((len - 1) / (BLOCK_SIZE * 2));
int blocksPerGridLevel2 = 1 + ceil(blocksPerGridLevel1 / (BLOCK_SIZE << 1));
int blocksPerGridLevel3 = 1 + ceil(blocksPerGridLevel2 / (BLOCK_SIZE << 1));
// -----------------------------------------------------------
// EXECUTION
// -----------------------------------------------------------
// choosing the level on which to run the kernels
// based on the size of the grids
// if level one grid size is equal to 1 then single
// LEVEL 1 is enough to scan the whole array
if (blocksPerGridLevel1 == 1) {
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernel
block_scan_full_BCAO<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT,
len,
NULL, 0);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}
// if level two grid size is equal to 1 then two (LEVEL 2)
// scans are required to scan the whole array
else if (blocksPerGridLevel2 == 1) {
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernels
block_scan_full_BCAO<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT,
len, d_SUM_1, 1);
block_scan_full_BCAO<<<blocksPerGridLevel2, BLOCK_SIZE>>>(d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, NULL, 0);
add_to_block<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}
// if level 3 grid size is equal to 1 then three (LEVEL 3)
// scans are required to scan the whole array
else if (blocksPerGridLevel3 == 1) {
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernels
block_scan_full_BCAO<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT,
len, d_SUM_1, 1);
block_scan_full_BCAO<<<blocksPerGridLevel2, BLOCK_SIZE>>>(d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, d_SUM_2, 1);
block_scan_full_BCAO<<<1, BLOCK_SIZE>>>(d_SUM_2, d_SUM_2_Scanned,
blocksPerGridLevel2, NULL, 0);
add_to_block<<<blocksPerGridLevel2, BLOCK_SIZE>>>(d_SUM_1_Scanned,
blocksPerGridLevel1, d_SUM_2_Scanned);
add_to_block<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}
// if none of the conditions above is met, it means that the array is too
// large to be scanned in 3 level scan, therefore we print the error message
// and return
else {
fprintf(stderr,
"The array size=%d is too large to be scanned with level 3 scan!\n",
len);
// using goto is discouraged, however, in such situations
// where in the error conditions exit or cleanup is required
// it is considered idiomatic
goto cleanup;
}
// check whether the run was successful
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the kernels to execute
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf(
"Full block with bank avoidance scan with %d elements took = %.5fmSecs\n",
len, d_msecs);
// copy the result from the device back to the host
err = cudaMemcpy(h_OUT, d_OUT, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array OUT from device to host");
// -----------------------------------------------------------
// CLEANUP
// -----------------------------------------------------------
cleanup:
// Free device global memory
CUDA_ERROR(cudaFree(d_IN), "Failed to free device vector IN");
CUDA_ERROR(cudaFree(d_OUT), "Failed to free device vector OUT");
CUDA_ERROR(cudaFree(d_SUM_1), "Failed to free device vector SUM_1");
CUDA_ERROR(cudaFree(d_SUM_1_Scanned),
"Failed to free device vector SUM_1_Scanned");
CUDA_ERROR(cudaFree(d_SUM_2), "Failed to free device vector SUM_2");
CUDA_ERROR(cudaFree(d_SUM_2_Scanned),
"Failed to free device vector SUM_2_Scanned");
// Clean up the Device timer event objects
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
// Reset the device and exit
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
__global__
void block_scan_full(int *g_idata, int *g_odata, int n, int *SUM,
int add_last) {
// shared memory init
__shared__ int temp[BLOCK_SIZE << 1];
// local variables for the later usage to improve the performance
int thid = threadIdx.x;
int blockId = blockDim.x * blockIdx.x << 1;
int offset = 0;
int last = 0;
// load the elements from global memory into the shared memory
if (blockId + (thid << 1) < n)
temp[thid << 1] = g_idata[blockId + (thid << 1)];
if (blockId + (thid << 1) + 1 < n)
temp[(thid << 1) + 1] = g_idata[blockId + (thid << 1) + 1];
// save the last element for later to improve the performance
if (add_last && thid == BLOCK_SIZE - 1)
last = temp[(thid << 1) + 1];
// build sum in place up the tree (reduction phase)
for (int d = BLOCK_SIZE; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = (((thid << 1) + 1) << offset) - 1;
int bi = (((thid << 1) + 2) << offset) - 1;
temp[bi] += temp[ai];
}
offset++;
}
// clear the last element
if (thid == 0)
temp[(BLOCK_SIZE << 1) - 1] = 0;
// traverse down tree & build scan (distribution phase)
for (int d = 1; d < (BLOCK_SIZE << 1); d <<= 1) {
offset--;
__syncthreads();
if (thid < d) {
int ai = (((thid << 1) + 1) << offset) - 1;
int bi = (((thid << 1) + 2) << offset) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// extract the sum (merged to improve the performance)
if (add_last && thid == BLOCK_SIZE - 1)
SUM[blockIdx.x] = temp[(thid << 1) + 1] + last;
// update the output vector by loading shared memory into the global memory
if (blockId + (thid << 1) < n)
g_odata[blockId + (thid << 1)] = temp[thid << 1];
if (blockId + (thid << 1) + 1 < n)
g_odata[blockId + (thid << 1) + 1] = temp[(thid << 1) + 1];
}
__host__
void full_block_scan(int *h_IN, int *h_OUT, int len) {
// -----------------------------------------------------------
// INITIALIZATION
// -----------------------------------------------------------
// error code to check return values for CUDA class
cudaError_t err = cudaSuccess;
// size to allocate for the vectors
size_t size = len * sizeof(int);
// create device timer
cudaEvent_t d_start, d_stop;
float d_msecs;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
// allocate memory for all the possible vectors needed for the execution
int *d_IN = NULL;
err = cudaMalloc((void **) &d_IN, size);
CUDA_ERROR(err, "Failed to allocate device vector IN");
int *d_OUT = NULL;
err = cudaMalloc((void**) &d_OUT, size);
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1 = NULL;
err = cudaMalloc((void**) &d_SUM_1,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_1_Scanned = NULL;
err = cudaMalloc((void**) &d_SUM_1_Scanned,
(1 + ((len - 1) / (BLOCK_SIZE * 2))) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2 = NULL;
err = cudaMalloc((void**) &d_SUM_2, (BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
int *d_SUM_2_Scanned = NULL;
err = cudaMalloc((void**) &d_SUM_2_Scanned,
(BLOCK_SIZE << 1) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector OUT");
// copy the memory from the host to the device
err = cudaMemcpy(d_IN, h_IN, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array IN from host to device");
// size of the grid for each level
int blocksPerGridLevel1 = 1 + ((len - 1) / (BLOCK_SIZE * 2));
int blocksPerGridLevel2 = 1 + ceil(blocksPerGridLevel1 / (BLOCK_SIZE << 1));
int blocksPerGridLevel3 = 1 + ceil(blocksPerGridLevel2 / (BLOCK_SIZE << 1));
// -----------------------------------------------------------
// EXECUTION
// -----------------------------------------------------------
// choosing the level on which to run the kernels
// based on the size of the grids
// if level one grid size is equal to 1 then single
// LEVEL 1 is enough to scan the whole array
if (blocksPerGridLevel1 == 1) {
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernel
block_scan_full<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT, len,
NULL, 0);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}
// if level two grid size is equal to 1 then two (LEVEL 2)
// scans are required to scan the whole array
else if (blocksPerGridLevel2 == 1) {
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernels
block_scan_full<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT, len,
d_SUM_1, 1);
block_scan_full<<<blocksPerGridLevel2, BLOCK_SIZE>>>(d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, NULL, 0);
add_to_block<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}
// if level 3 grid size is equal to 1 then three (LEVEL 3)
// scans are required to scan the whole array
else if (blocksPerGridLevel3 == 1) {
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernels
block_scan_full<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT, len,
d_SUM_1, 1);
block_scan_full<<<blocksPerGridLevel2, BLOCK_SIZE>>>(d_SUM_1,
d_SUM_1_Scanned, blocksPerGridLevel1, d_SUM_2, 1);
block_scan_full<<<1, BLOCK_SIZE>>>(d_SUM_2, d_SUM_2_Scanned,
blocksPerGridLevel2, NULL, 0);
add_to_block<<<blocksPerGridLevel2, BLOCK_SIZE>>>(d_SUM_1_Scanned,
blocksPerGridLevel1, d_SUM_2_Scanned);
add_to_block<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_OUT, len,
d_SUM_1_Scanned);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}
// if none of the conditions above is met, it means that the array is too
// large to be scanned in 3 level scan, therefore we print the error message and return
else {
fprintf(stderr,
"The array size=%d is too large to be scanned with level 3 scan!\n",
len);
// using goto is discouraged, however, in such situations
// where in the error conditions exit or cleanup is required
// it is considered idiomatic
goto cleanup;
}
// check whether the run was successful
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the kernels to execute
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf("Full block scan with %d elements took = %.5fmSecs\n", len, d_msecs);
// copy the result from the device back to the host
err = cudaMemcpy(h_OUT, d_OUT, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array OUT from device to host");
// -----------------------------------------------------------
// CLEANUP
// -----------------------------------------------------------
cleanup:
// Free device global memory
CUDA_ERROR(cudaFree(d_IN), "Failed to free device vector IN");
CUDA_ERROR(cudaFree(d_OUT), "Failed to free device vector OUT");
CUDA_ERROR(cudaFree(d_SUM_1), "Failed to free device vector SUM_1");
CUDA_ERROR(cudaFree(d_SUM_1_Scanned),
"Failed to free device vector SUM_1_Scanned");
CUDA_ERROR(cudaFree(d_SUM_2), "Failed to free device vector SUM_2");
CUDA_ERROR(cudaFree(d_SUM_2_Scanned),
"Failed to free device vector SUM_2_Scanned");
// Clean up the Device timer event objects
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
// Reset the device and exit
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
/**
* Host main routine
*/
int main(void) {
// error code to check return calues for CUDA calss
cudaError_t err = cudaSuccess;
// create host stopwatch times
StopWatchInterface * timer = NULL;
sdkCreateTimer(&timer);
double h_msecs;
// size of the array to add
int numElements = 10000000;
size_t size = numElements * sizeof(int);
// allocate the memory on the host for the arrays
int *h_IN = (int *) malloc(size);
int *h_OUT = (int *) malloc(size);
int *h_OUT_CUDA = (int *) malloc(size);
// verify the host allocations
if (h_IN == NULL || h_OUT == NULL || h_OUT_CUDA == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// initialise the host input to 1.0f
for (int i = 0; i < numElements; i++) {
h_IN[i] = rand() % 10;
}
// sequential scan
sdkStartTimer(&timer);
sequential_scan(h_IN, h_OUT, numElements);
sdkStopTimer(&timer);
h_msecs = sdkGetTimerValue(&timer);
printf("Sequential scan on host of %d elements took = %.5fmSecs\n",
numElements, h_msecs);
// -----------------------------------------------------------
// PERFROM FULL BLOCK WITH BCAO
// -----------------------------------------------------------
full_block_scan_BCAO(h_IN, h_OUT_CUDA, numElements);
compare_results(h_OUT, h_OUT_CUDA, numElements);
// -----------------------------------------------------------
// PERFROM FULL BLOCK WITHOUT BCAO
// -----------------------------------------------------------
full_block_scan(h_IN, h_OUT_CUDA, numElements);
compare_results(h_OUT, h_OUT_CUDA, numElements);
// -----------------------------------------------------------
// SIMPLE BLOCK SCANS
// -----------------------------------------------------------
// create device timer
cudaEvent_t d_start, d_stop;
float d_msecs;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
// allocate memory for all the possible vectors needed for the execution
int *d_IN = NULL;
err = cudaMalloc((void **) &d_IN, size);
CUDA_ERROR(err, "Failed to allocate device vector IN");
int *d_OUT = NULL;
err = cudaMalloc((void**) &d_OUT, size);
CUDA_ERROR(err, "Failed to allocate device vector OUT");
// copy the memory from the host to the device
err = cudaMemcpy(d_IN, h_IN, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array IN from host to device");
// size of the grid for each level
int blocksPerGridLevel1 = 1 + ((numElements - 1) / (BLOCK_SIZE * 2));
// -----------------------------------------------------------
// BLOCK SCAN WITH BCAO
// -----------------------------------------------------------
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernel
block_scan_full_BCAO<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT,
numElements,
NULL, 0);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
// check whether the run was successful
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the kernels to execute
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf("Block with bank avoidance scan %d elements took = %.5fmSecs\n",
numElements, d_msecs);
// -----------------------------------------------------------
// BLOCK SCAN WITHOUT BCAO
// -----------------------------------------------------------
// record the start time
cudaEventRecord(d_start, 0);
// execute the actual kernel
block_scan_full<<<blocksPerGridLevel1, BLOCK_SIZE>>>(d_IN, d_OUT,
numElements,
NULL, 0);
// record the stop time
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
// check whether the run was successful
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
// get the duration it took for the k// save the last element for later to improve the performanceernels to execute
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
// print the time elapsed
printf("Block scan %d elements took = %.5fmSecs\n", numElements, d_msecs);
// Free device global memory
CUDA_ERROR(cudaFree(d_IN), "Failed to free device vector IN");
CUDA_ERROR(cudaFree(d_OUT), "Failed to free device vector OUT");
// Clean up the Device timer event objects
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
// Reset the device and exit
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
// Free host memory
free(h_IN);
free(h_OUT);
free(h_OUT_CUDA);
// Clean up the Host timer
sdkDeleteTimer(&timer);
return 0;
}
|
620275b89f493ef8cb28fcb25f0bf1114eb4d445.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <time.h>
#include <algorithm>
#include "sssp.cuh"
#include "sssp_config.cuh"
#define VERT(x) ((x) & 0xffffffff)
#define DISTANCE(x) ((x) >> 32)
struct input_line {
int src, dst;
};
//From StackOverflow
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
bool sort_input(const input_line &a, const input_line &b) {return a.src < b.src || (a.src == b.src && a.dst < b.dst);}
int main(int argc,char ** argv){
if(argc != 3){
printf("Usage: sssp [graph filename] [number of lines]\n");
return 0;
}
int * edge_list_index;
int * edge_dst;
int * edge_weight;
int * distance;
int vert_count = 0;
FILE * fin = fopen(argv[1],"r");
FILE * fout = fopen("output.txt", "w");
//FILE * ftrace = fopen("trace.txt", "w");
int input_line_count;
sscanf(argv[2], " %d", &input_line_count);
input_line * lines = new input_line[input_line_count * 2];
for(int i = 0;i < input_line_count;++i){
fscanf(fin, " %d %d", &(lines[i * 2].src), &(lines[i * 2].dst));
if(lines[i * 2].src >= vert_count) {vert_count = lines[i * 2].src + 1;}
if(lines[i * 2].dst >= vert_count) {vert_count = lines[i * 2].dst + 1;}
lines[i * 2 + 1].src = lines[i * 2].dst;
lines[i * 2 + 1].dst = lines[i * 2].src;
}
std::sort(lines, lines + input_line_count * 2, sort_input);
int edge_count = input_line_count * 2;
edge_list_index = new int[vert_count + 1];
edge_dst = new int[edge_count];
edge_weight = new int[edge_count];
distance = new int[vert_count];
int curr_vert = 0;
edge_list_index[0] = 0;
for(int i = 0;i < edge_count;++i){
while(curr_vert < lines[i].src){++curr_vert; edge_list_index[curr_vert] = i;}
edge_dst[i] = lines[i].dst;
edge_weight[i] = 1;
}
edge_list_index[vert_count] = edge_count;
for(int i = 0;i < vert_count;++i){distance[i] = 2147483647;}
distance[0] = 0;
int * gpu_edge_list_index, * gpu_edge_dst, * gpu_edge_weight, * gpu_distance;
hipMalloc((void **)&gpu_edge_list_index, sizeof(int) * (vert_count + 1));
hipMemcpy(gpu_edge_list_index, edge_list_index, sizeof(int) * (vert_count + 1), hipMemcpyHostToDevice);
hipMalloc((void **)&gpu_edge_dst, sizeof(int) * edge_count);
hipMemcpy(gpu_edge_dst, edge_dst, sizeof(int) * edge_count, hipMemcpyHostToDevice);
hipMalloc((void **)&gpu_edge_weight, sizeof(int) * edge_count);
hipMemcpy(gpu_edge_weight, edge_weight, sizeof(int) * edge_count, hipMemcpyHostToDevice);
hipMalloc((void **)&gpu_distance, sizeof(int) * vert_count);
hipMemcpy(gpu_distance, distance, sizeof(int) * vert_count, hipMemcpyHostToDevice);
int batch_count = vert_count * 5 / CONFIG_BATCH_SIZE;
if(batch_count < 3) {batch_count = 3;}
Heap_With_Aux < unsigned long long, int > cpu_heap(batch_count, CONFIG_BATCH_SIZE, 1ull << 63, 0, CONFIG_THREAD_GROUP_NUM), * gpu_heap;
hipMalloc((void **)&gpu_heap, sizeof(Heap_With_Aux < unsigned long long, int >));
hipMemcpy(gpu_heap, &cpu_heap, sizeof(Heap_With_Aux < unsigned long long, int >), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( insertInitNode), dim3(1), dim3(1), 4906, 0, gpu_heap, 0);
unsigned long long * gpu_inserted_nodes;
hipMalloc((void **)&gpu_inserted_nodes, sizeof(unsigned long long) * CONFIG_THREAD_GROUP_NUM * CONFIG_BATCH_SIZE * CONFIG_CHUNK_SIZE);
int * gpu_term_sig;
hipMalloc((void **)&gpu_term_sig, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
printf("Preparation complete\n");
struct timespec start_time, end_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
int iteration = 0;
do{
hipMemset(gpu_term_sig, 0, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
hipLaunchKernelGGL(( ssspKernel), dim3(CONFIG_THREAD_GROUP_NUM), dim3(CONFIG_THREAD_NUM), 36864, 0, gpu_heap, gpu_edge_list_index, gpu_edge_dst, gpu_edge_weight, gpu_distance, gpu_inserted_nodes, gpu_term_sig);
++iteration;
//if(iteration % 100 == 0) {printf("%d\n", iteration);}
hipMemcpy(&cpu_heap, gpu_heap, sizeof(Heap_With_Aux < unsigned long long, int >), hipMemcpyDeviceToHost);
} while(cpu_heap.curr_aux_buf_size > 0 || cpu_heap.heap.itemCount() > 0);
clock_gettime(CLOCK_MONOTONIC, &end_time);
printf("Finished in %d iterations\n", iteration);
int64_t duration = timespecDiff(&end_time, &start_time);
printf("Microseconds: %ld\n", duration / 1000);
hipMemcpy(distance, gpu_distance, sizeof(int) * vert_count, hipMemcpyDeviceToHost);
for(int i = 0;i < vert_count;++i){
fprintf(fout, "%d %d\n", i, distance[i]);
}
return 0;
}
| 620275b89f493ef8cb28fcb25f0bf1114eb4d445.cu | #include <stdio.h>
#include <stdint.h>
#include <time.h>
#include <algorithm>
#include "sssp.cuh"
#include "sssp_config.cuh"
#define VERT(x) ((x) & 0xffffffff)
#define DISTANCE(x) ((x) >> 32)
struct input_line {
int src, dst;
};
//From StackOverflow
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
bool sort_input(const input_line &a, const input_line &b) {return a.src < b.src || (a.src == b.src && a.dst < b.dst);}
int main(int argc,char ** argv){
if(argc != 3){
printf("Usage: sssp [graph filename] [number of lines]\n");
return 0;
}
int * edge_list_index;
int * edge_dst;
int * edge_weight;
int * distance;
int vert_count = 0;
FILE * fin = fopen(argv[1],"r");
FILE * fout = fopen("output.txt", "w");
//FILE * ftrace = fopen("trace.txt", "w");
int input_line_count;
sscanf(argv[2], " %d", &input_line_count);
input_line * lines = new input_line[input_line_count * 2];
for(int i = 0;i < input_line_count;++i){
fscanf(fin, " %d %d", &(lines[i * 2].src), &(lines[i * 2].dst));
if(lines[i * 2].src >= vert_count) {vert_count = lines[i * 2].src + 1;}
if(lines[i * 2].dst >= vert_count) {vert_count = lines[i * 2].dst + 1;}
lines[i * 2 + 1].src = lines[i * 2].dst;
lines[i * 2 + 1].dst = lines[i * 2].src;
}
std::sort(lines, lines + input_line_count * 2, sort_input);
int edge_count = input_line_count * 2;
edge_list_index = new int[vert_count + 1];
edge_dst = new int[edge_count];
edge_weight = new int[edge_count];
distance = new int[vert_count];
int curr_vert = 0;
edge_list_index[0] = 0;
for(int i = 0;i < edge_count;++i){
while(curr_vert < lines[i].src){++curr_vert; edge_list_index[curr_vert] = i;}
edge_dst[i] = lines[i].dst;
edge_weight[i] = 1;
}
edge_list_index[vert_count] = edge_count;
for(int i = 0;i < vert_count;++i){distance[i] = 2147483647;}
distance[0] = 0;
int * gpu_edge_list_index, * gpu_edge_dst, * gpu_edge_weight, * gpu_distance;
cudaMalloc((void **)&gpu_edge_list_index, sizeof(int) * (vert_count + 1));
cudaMemcpy(gpu_edge_list_index, edge_list_index, sizeof(int) * (vert_count + 1), cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_edge_dst, sizeof(int) * edge_count);
cudaMemcpy(gpu_edge_dst, edge_dst, sizeof(int) * edge_count, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_edge_weight, sizeof(int) * edge_count);
cudaMemcpy(gpu_edge_weight, edge_weight, sizeof(int) * edge_count, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_distance, sizeof(int) * vert_count);
cudaMemcpy(gpu_distance, distance, sizeof(int) * vert_count, cudaMemcpyHostToDevice);
int batch_count = vert_count * 5 / CONFIG_BATCH_SIZE;
if(batch_count < 3) {batch_count = 3;}
Heap_With_Aux < unsigned long long, int > cpu_heap(batch_count, CONFIG_BATCH_SIZE, 1ull << 63, 0, CONFIG_THREAD_GROUP_NUM), * gpu_heap;
cudaMalloc((void **)&gpu_heap, sizeof(Heap_With_Aux < unsigned long long, int >));
cudaMemcpy(gpu_heap, &cpu_heap, sizeof(Heap_With_Aux < unsigned long long, int >), cudaMemcpyHostToDevice);
insertInitNode<<<1, 1, 4906>>>(gpu_heap, 0);
unsigned long long * gpu_inserted_nodes;
cudaMalloc((void **)&gpu_inserted_nodes, sizeof(unsigned long long) * CONFIG_THREAD_GROUP_NUM * CONFIG_BATCH_SIZE * CONFIG_CHUNK_SIZE);
int * gpu_term_sig;
cudaMalloc((void **)&gpu_term_sig, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
printf("Preparation complete\n");
struct timespec start_time, end_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
int iteration = 0;
do{
cudaMemset(gpu_term_sig, 0, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
ssspKernel<<<CONFIG_THREAD_GROUP_NUM, CONFIG_THREAD_NUM, 36864>>>(gpu_heap, gpu_edge_list_index, gpu_edge_dst, gpu_edge_weight, gpu_distance, gpu_inserted_nodes, gpu_term_sig);
++iteration;
//if(iteration % 100 == 0) {printf("%d\n", iteration);}
cudaMemcpy(&cpu_heap, gpu_heap, sizeof(Heap_With_Aux < unsigned long long, int >), cudaMemcpyDeviceToHost);
} while(cpu_heap.curr_aux_buf_size > 0 || cpu_heap.heap.itemCount() > 0);
clock_gettime(CLOCK_MONOTONIC, &end_time);
printf("Finished in %d iterations\n", iteration);
int64_t duration = timespecDiff(&end_time, &start_time);
printf("Microseconds: %ld\n", duration / 1000);
cudaMemcpy(distance, gpu_distance, sizeof(int) * vert_count, cudaMemcpyDeviceToHost);
for(int i = 0;i < vert_count;++i){
fprintf(fout, "%d %d\n", i, distance[i]);
}
return 0;
}
|
d4b4f1c469ac3ca264b8fbf38cd23f33772b341d.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
// reference: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v8.0\0_Simple\matrixMul
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
template <int BLOCK_SIZE>
__global__ static void matrix_mul(const float* A, const float* B, float* C, int wA, int wB)
{
/* gridDim: ,,,
,,.
grid,dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
/* __shared__: __shared____device__
blockblock
block__shared____constant__
__shared__extern
__shared__CUDA C
__shared__CUDA C
*/
// Declaration of the shared memory array As used to store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
/* __syncthreads: CUDA
__syncthreads()
__syncthreads();block(shared
memory)(kernel
__syncthreads())clock()
clock()
__syncthreads()block
threadblock
thread */
// Synchronize to make sure the matrices are loaded
__syncthreads();
/* reference:
https://devblogs.nvidia.com/parallelforall/new-compiler-features-cuda-8/
https://stackoverflow.com/questions/22278631/what-does-pragma-unroll-do-exactly-does-it-affect-the-number-of-threads/22279341
#pragma unroll
()pragma unroll
#pragma unroll 1
*/
#pragma unroll
// Multiply the two matrices together; each thread computes one element of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
__global__ static void matrix_mul(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float sum{ 0.f };
for (int t = 0; t < colsA; ++t) {
sum += A[y * colsA + t] * B[t * colsB + x];
}
C[offset] = sum;
}
int matrix_mul_gpu(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB, float* elapsed_time)
{
CHECK(colsA == rowsB);
/* hipEvent_t: CUDA event types, CUDAGPU
CUDAGPUCUDA
GPU*/
hipEvent_t start, stop;
// hipEventCreate:
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord: ,start
hipEventRecord(start, 0);
size_t lengthA{ colsA * rowsA * sizeof(float) }, lengthB{ colsB * rowsB * sizeof(float) };
size_t lengthC{ rowsA * colsB * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_C{ nullptr };
// hipMalloc:
hipMalloc(&d_A, lengthA);
hipMalloc(&d_B, lengthB);
hipMalloc(&d_C, lengthC);
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(d_A, A, lengthA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, lengthB, hipMemcpyHostToDevice);
//hipMemcpy(d_C, C, lengthC, hipMemcpyHostToDevice);
const int block_size{ 32 };
/* dim3: uint33unsigned int
dim3
1 */
dim3 dimsA(colsA, rowsA, 1);
dim3 dimsB(colsB, rowsB, 1);
CHECK(dimsA.x == dimsB.y);
//fprintf(stderr, "MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU,;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
hipLaunchKernelGGL(( matrix_mul<block_size>) , dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, dimsA.x, dimsB.x); //
//matrix_mul<< < grid, threads >> >(d_A, d_B, d_C, colsA, rowsA, colsB, rowsB);
/* hipDeviceSynchronize: kernel, ,
cudaDeviceSynchronize;
cudaDeviceSynchronize
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
//hipDeviceSynchronize();
hipMemcpy(C, d_C, lengthC, hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// hipEventRecord: ,stop
hipEventRecord(stop, 0);
// hipEventSynchronize:
hipEventSynchronize(stop);
// cudaEventElapseTime:
hipEventElapsedTime(elapsed_time, start, stop);
// hipEventDestroy:
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| d4b4f1c469ac3ca264b8fbf38cd23f33772b341d.cu | #include "funset.hpp"
#include <iostream>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
// reference: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v8.0\0_Simple\matrixMul
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
template <int BLOCK_SIZE>
__global__ static void matrix_mul(const float* A, const float* B, float* C, int wA, int wB)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
一个grid为三维,为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
/* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限
定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同
的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量
默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小
由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字
__shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译
器对共享内存中的变量与普通变量将分别采取不同的处理方式 */
// Declaration of the shared memory array As used to store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
/* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块
中的每个线程都执行了__syncthreads(),否则没有任何线程能执行
__syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared
memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用
__syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时,
在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数,
并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有
thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了,
而不需要记录每个thread的时间 */
// Synchronize to make sure the matrices are loaded
__syncthreads();
/* reference:
https://devblogs.nvidia.com/parallelforall/new-compiler-features-cuda-8/
https://stackoverflow.com/questions/22278631/what-does-pragma-unroll-do-exactly-does-it-affect-the-number-of-threads/22279341
编译器默认情况下将循环展开小的次数,#pragma unroll能够指定循环
以多少次展开(程序员必须保证按这个展开是正确的),pragma unroll 后
必须紧接着处理的循环,可选择在其后接一个数字,指定必须展开多少次循环,
#pragma unroll 1 表示禁止编译器将循环展开。如果没指定次数,对于常数
次的循环,循环将完全展开,对于不确定次数的循环,循环将不会展开。
*/
#pragma unroll
// Multiply the two matrices together; each thread computes one element of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
__global__ static void matrix_mul(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float sum{ 0.f };
for (int t = 0; t < colsA; ++t) {
sum += A[y * colsA + t] * B[t * colsB + x];
}
C[offset] = sum;
}
int matrix_mul_gpu(const float* A, const float* B, float* C, int colsA, int rowsA, int colsB, int rowsB, float* elapsed_time)
{
CHECK(colsA == rowsB);
/* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某
个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在
GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时*/
cudaEvent_t start, stop;
// cudaEventCreate: 创建一个事件对象,异步启动
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord: 记录一个事件,异步启动,start记录起始时间
cudaEventRecord(start, 0);
size_t lengthA{ colsA * rowsA * sizeof(float) }, lengthB{ colsB * rowsB * sizeof(float) };
size_t lengthC{ rowsA * colsB * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_C{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&d_A, lengthA);
cudaMalloc(&d_B, lengthB);
cudaMalloc(&d_C, lengthC);
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(d_A, A, lengthA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, lengthB, cudaMemcpyHostToDevice);
//cudaMemcpy(d_C, C, lengthC, cudaMemcpyHostToDevice);
const int block_size{ 32 };
/* dim3: 基于uint3定义的内置矢量类型,相当于由3个unsigned int类型组成的
结构体,可表示一个三维数组,在定义dim3类型变量时,凡是没有赋值的元素都
会被赋予默认值1 */
dim3 dimsA(colsA, rowsA, 1);
dim3 dimsB(colsB, rowsB, 1);
CHECK(dimsA.x == dimsB.y);
//fprintf(stderr, "MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
matrix_mul<block_size> <<< grid, threads >>>(d_A, d_B, d_C, dimsA.x, dimsB.x); // 运行较快
//matrix_mul<< < grid, threads >> >(d_A, d_B, d_C, colsA, rowsA, colsB, rowsB);
/* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一
般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到
前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会
返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须
在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
//cudaDeviceSynchronize();
cudaMemcpy(C, d_C, lengthC, cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间
cudaEventRecord(stop, 0);
// cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动
cudaEventSynchronize(stop);
// cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动
cudaEventElapsedTime(elapsed_time, start, stop);
// cudaEventDestroy: 销毁事件对象,异步启动
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
ee0921a20e799acbc9c9d8cd48421c2ea6840cf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//a#########################################################
//a## 2D Acoustic VTI Medium Forward
//a##
//a## Ps :GPU(CUDA)
//a##
//a##/*a***************************
//a##Function for VTI medium modeling,
//a##
//a## Ps: the function of modeling following:
//a##
//a## du/dt_=1/rho*dp/dx_ ,
//a## dv/dt_=1/rho*dp/dy_ ,
//a## dw/dt_=1/rho*dq/dz_ ,
//a## dp/dt_=rho*vpx^2*(du/dx_+dv/dy_)+rho*vp*vpn*dw/dz_ ,
//a## dq/dt_=rho*vp*vpn*(du/dx_+dv/dy_)+rho*vp^2*dw/dz_ ,
//a## vpx^2=vp^2*(1+2*epsilu);
//a## vpn^2=vp^2*(1+2*deta);
//a##
//a##*********a*******************/
//a##
//a## Rong Tao
//a##
//a#########################################################
#include<stdio.h>
#include<malloc.h>
#include<math.h>
#include<stdlib.h>
#include<string.h>
#include<cuda_runtime.h>
#define pi 3.141592653
__device__ float d0;
__device__ int mm=4;
__constant__ float c[4]={1.196289,-0.0797526,0.009570313,-0.0006975447};
void check_gpu_error2d (const char *msg)
/*< check GPU errors >*/
{
hipError_t err = hipGetLastError ();
if (hipSuccess != err) {
printf("Cuda error: %s: %s\n", msg, hipGetErrorString (err));
exit(0);
}
}
/*************func*******************/
void pad_vv2d(int nx,int nz,int nnx,int nnz,int npd,float *ee)
{
int ix,iz,id;
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(ix<npd){
ee[id]=ee[npd*nnz+iz]; //left
}else if(ix>=nnx-npd){
ee[id]=ee[(nnx-npd-1)*nnz+iz];//right
}
}
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(iz<npd){
ee[id]=ee[ix*nnz+npd];//up
}else if(iz>=nnz-npd){
ee[id]=ee[ix*nnz+nnz-npd-1];//down
}
//if(ee[id]==0){printf("ee[%d][%d]==0.0\n",ix,iz);exit(0);}
}
}
/*************func*******************/
bool read_file2d(const char FN1[],const char FN2[],const char FN3[],
int nx,int nz,int nnx,int nnz,float *vv,float *epsilu,float *deta,int npd)
{
int i,j,id,vmax=0.0;
FILE *fp1,*fp2,*fp3;
if((fp1=fopen(FN1,"rb"))==NULL){printf("error open <%s>!\n",FN1);return false;}
if((fp2=fopen(FN2,"rb"))==NULL){printf("error open <%s>!\n",FN2);return false;}
if((fp3=fopen(FN3,"rb"))==NULL){printf("error open <%s>!\n",FN3);return false;}
for(i=npd;i<nx+npd;i++)
{
for(j=npd;j<nz+npd;j++)
{
id=i*nnz+j;
fread(&vv[id],4L,1,fp1);if(vmax<vv[id])vmax=vv[id];
fread(&epsilu[id],4L,1,fp2);
fread(&deta[id],4L,1,fp3);
}
}
fclose(fp1);printf("vmax=%d\n",vmax);
fclose(fp2);
fclose(fp3);
return true;
}
/********************func**********************/
__global__ void get_d02d(float dx_,float dz_,int nnx,int nnz,int npd,float *vp)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<1)d0=10.0*vp[nnx*nnz/2]*log(100000.0)/(2.0*npd*((dx_+dz_)/2.0));
}
/*************func*******************/
__global__ void initial_coffe2d(float dt_,int nn,float *coff1,float *coff2,float *acoff1,float *acoff2,int npd)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nn+2*npd)
{
if(id<npd)
{
coff1[id]=1.0/(1.0+(dt_*d0*pow((npd-0.5-id)/npd,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt_*d0*pow((npd-0.5-id)/npd,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt_*d0*pow(((npd-id)*1.0)/npd,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt_*d0*pow(((npd-id)*1.0)/npd,2.0))/2.0);
}else if(id>=npd&&id<npd+nn){
coff1[id]=1.0;
coff2[id]=1.0;
acoff1[id]=1.0;
acoff2[id]=1.0;
}else{
coff1[id]=1.0/(1.0+(dt_*d0*pow((0.5+id-nn-npd)/npd,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt_*d0*pow((0.5+id-nn-npd)/npd,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt_*d0*pow(((id-nn-npd)*1.0)/npd,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt_*d0*pow(((id-nn-npd)*1.0)/npd,2.0))/2.0);
}
}
}
/*************func*******************/
__global__ void shot_record2d(int nnx, int nnz, int nx, int nz, int npd, int it, int nt_, float *P, float *shot)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nx)
{
shot[it+nt_*id]=P[npd+nnz*(id+npd)];
}
}
/*************func**************/
__global__ void mute_directwave2d(int nx,int nt,float dt,float favg,
float dx,float dz,int fs,int ds,int zs,int is,
float *vp,float *epsilu,float *shot,int tt)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int mu_t,mu_nt;
float mu_x,mu_z,mu_t0;
int ix=id/nt;
int it=id%nt;
if(id<nx*nt)
{
mu_x=dx*abs(ix-fs-(is-1)*ds);
mu_z=dz*zs;
mu_t0=sqrtf(pow(mu_x,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilu[1]));
mu_t=(int)(2.0/(dt*favg));
mu_nt=(int)(mu_t0/dt)+mu_t+tt;
if((it>(int)(mu_t0/dt)-tt)&&it<mu_nt)
shot[id]=0.0;
}
}
//a################################################################################
__global__ void add_source2d(float pfac,float xsn,float zsn,int nx,int nz,int nnx,int nnz,float dt_,float t,
float favg_,int wtype,int npd,int is,int ds,float *P,float *Q)
/*< generate ricker wavelet with time deley >*/
{
int ixs,izs;
float x_,xx_,tdelay,ts,source=0.0,fs;
tdelay=1.0/favg_;
ts=t-tdelay;
fs=xsn+(is-1)*ds;
if(wtype==1)//ricker wavelet
{
x_=favg_*ts;
xx_=x_*x_;
source=(1-2*pi*pi*(xx_))*exp(-(pi*pi*xx_));
}else if(wtype==2){//derivative of gaussian
x_=(-4)*favg_*favg_*pi*pi/log(0.1);
source=(-2)*pi*pi*ts*exp(-x_*ts*ts);
}else if(wtype==3){//derivative of gaussian
x_=(-1)*favg_*favg_*pi*pi/log(0.1);
source=exp(-x_*ts*ts);
}
if(t<=2*tdelay)
{
ixs = (int)(fs+0.5)+npd-1;
izs = (int)(zsn+0.5)+npd-1;
P[ixs*nnz+izs]+=pfac*source;
Q[ixs*nnz+izs]+=pfac*source;
}
}
/*******************func*********************/
__global__ void update_vel2d(int nx,int nz,int nnx,int nnz,int npd,float dt_,float dx_,float dz_,
float *u0,float *w0,float *u1,float *w1,float *P,float *Q,
float *coffx1,float *coffx2,float *coffz1,float *coffz2)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix,iz,im;
float dtx,dtz,xx,zz;
ix=id/nnz;
iz=id%nnz;
dtx=dt_/dx_;
dtz=dt_/dz_;
if(id>=mm&&id<nnx*nnz-mm)
{
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(P[id+(im+1)*nnz]-P[id-im*nnz]);
zz+=c[im]*(Q[id+im+1] -Q[id-im]);
}
u1[id]=coffx2[ix]*u0[id]-coffx1[ix]*dtx*xx;
w1[id]=coffz2[iz]*w0[id]-coffz1[iz]*dtz*zz;
}
}
}
/*******************func***********************/
__global__ void update_stress2d(int nx,int nz,int nnx,int nnz,float dt_,float dx_,float dz_,
float *u1,float *w1,float *P,float *Q,float *vp,int npd,
float *px1,float *px0,float *pz1,float *pz0,float *qx1,float *qx0,float *qz1,float *qz0,
float *acoffx1,float *acoffx2,float *acoffz1,float *acoffz2,
float *deta,float *epsilu,int fs,int ds,int zs,int is,int SV)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int im,ix,iz,rx,rz,R=15,r=4;
float dtx,dtz, xx,zz,ee,dd;
ix=id/nnz;
iz=id%nnz;
dtx=dt_/dx_;
dtz=dt_/dz_;
if(id>=mm&&id<nnx*nnz-mm)
{
/************************i****************************************/
/************************iso circle start*************************/
rx=ix-(fs+(is-1)*ds+npd);
rz=iz-(zs+npd);
if(SV){
if((rx*rx+rz*rz)<=R*R){
if((rx*rx+rz*rz)<=r*r){
ee = 0.0;
dd = 0.0;
}else{
ee = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*epsilu[id];
dd = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*deta[id];
}
}else{
ee=epsilu[id];
dd=deta[id];
}
}else{
ee=epsilu[id];
dd=deta[id];
}
/************************ iso circle end *************************/
/************************i****************************************/
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(u1[id+im*nnz]-u1[id-(im+1)*nnz]);
zz+=c[im]*(w1[id+im] -w1[id-im-1]);
}
px1[id]=acoffx2[ix]*px0[id]-acoffx1[ix]*vp[id]*vp[id]*(1+2*ee)*dtx*xx;
pz1[id]=acoffz2[iz]*pz0[id]-acoffz1[iz]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtz*zz;
qx1[id]=acoffx2[ix]*qx0[id]-acoffx1[ix]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtx*xx;
qz1[id]=acoffz2[iz]*qz0[id]-acoffz1[iz]*vp[id]*vp[id]*dtz*zz;
P[id]=px1[id]+pz1[id];
Q[id]=qx1[id]+qz1[id];
}
}
}
//###################################model#######################################
extern "C" void GPU_vti2dfd(int nx, int nz,int dx,int dz,int npd,int SV,
const char FNv[],const char FNe[],const char FNd[],
int favg,int ns,int fs,int ds,int zs,
const char FNshot[],const char FNsnap[],int nt, int dt,int run_count)
{
//int ite=0;
//loop1:if(ite>0){printf("Please ensure parameter input accurately! \n");ite++;}
float dx_,dz_,favg_,dt_,pfac;
dx_=(float)dx;
dz_=(float)dz;
favg_=(float)favg;
printf("\n##### model start #####\n");
printf("# nx=%2d, dx=%.2f, npd=%d\n",nx,dx_,npd);
printf("# nz=%2d, dz=%.2f, SV=%d\n",nz,dz_,SV);
printf("# vel=<%s>\n",FNv);
printf("# epsilu=<%s>\n",FNe);
printf("# deta=<%s>\n",FNd);
printf("# favg=%.2f\n",favg_);
printf("# ns=%3d\n",ns);
printf("# fs=%3d\n",fs);
printf("# ds=%3d\n",ds);
printf("# zs=%3d\n",zs);
printf("# shot=<%s>\n",FNshot);
printf("# snap=<%s>\n",FNsnap);
FILE *fpsnap, *fpshot;
fpshot=fopen(FNshot,"wb");
fpsnap=fopen(FNsnap,"wb");
int is, it, nnx, nnz, nt_, wtype;
float *v, *e, *d, t;
float *vp, *epsilu, *deta;
float *u0, *u1, *px0, *qx0, *px1, *qx1;
float *w0, *w1, *pz0, *qz0, *pz1, *qz1;
float *P, *Q, *shot_Dev, *shot_Hos;
float *coffx1,*coffx2,*coffz1,*coffz2,*acoffx1,*acoffx2,*acoffz1,*acoffz2;
clock_t start, end;
wtype=1;
nt_=nt;
dt_=(float)(dt*1.0/1000000);
pfac=10.0;
nnx=nx+2*npd;
nnz=nz+2*npd;
v=(float*)malloc(nnz*nnx*sizeof(float));
e=(float*)malloc(nnz*nnx*sizeof(float));
d=(float*)malloc(nnz*nnx*sizeof(float));
shot_Hos=(float*)malloc(nt_*nx*sizeof(float));
if(read_file2d(FNv,FNe,FNd,nx,nz,nnx,nnz,v,e,d,npd))printf("Read file done !\n");
else{printf("Read file error !\n");return;}
pad_vv2d(nx,nz,nnx,nnz,npd,e);
pad_vv2d(nx,nz,nnx,nnz,npd,d);
pad_vv2d(nx,nz,nnx,nnz,npd,v);
hipSetDevice(0);// initialize device, default device=0;
if(run_count==0)check_gpu_error2d("Failed to initialize device!");
/****************************/
hipMalloc(&vp, nnz*nnx*sizeof(float));
hipMalloc(&epsilu, nnz*nnx*sizeof(float));
hipMalloc(&deta, nnz*nnx*sizeof(float));
hipMemcpy(vp, v, nnz*nnx*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(epsilu, e, nnz*nnx*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deta, d, nnz*nnx*sizeof(float), hipMemcpyHostToDevice);
/****************************/
hipMalloc(&u0, nnz*nnx*sizeof(float)); hipMalloc(&u1, nnz*nnx*sizeof(float));
hipMalloc(&w0, nnz*nnx*sizeof(float)); hipMalloc(&w1, nnz*nnx*sizeof(float));
hipMalloc(&P, nnz*nnx*sizeof(float)); hipMalloc(&Q, nnz*nnx*sizeof(float));
hipMalloc(&px0, nnz*nnx*sizeof(float)); hipMalloc(&px1, nnz*nnx*sizeof(float));
hipMalloc(&pz0, nnz*nnx*sizeof(float)); hipMalloc(&pz1, nnz*nnx*sizeof(float));
hipMalloc(&qx0, nnz*nnx*sizeof(float)); hipMalloc(&qx1, nnz*nnx*sizeof(float));
hipMalloc(&qz0, nnz*nnx*sizeof(float)); hipMalloc(&qz1, nnz*nnx*sizeof(float));
hipMalloc(&coffx1, nnx*sizeof(float)); hipMalloc(&coffx2, nnx*sizeof(float));
hipMalloc(&coffz1, nnz*sizeof(float)); hipMalloc(&coffz2, nnz*sizeof(float));
hipMalloc(&acoffx1, nnx*sizeof(float)); hipMalloc(&acoffx2, nnx*sizeof(float));
hipMalloc(&acoffz1, nnz*sizeof(float)); hipMalloc(&acoffz2, nnz*sizeof(float));
hipMalloc(&shot_Dev, nx*nt_*sizeof(float));
if(run_count==0)check_gpu_error2d("Failed to allocate memory for variables!");
hipLaunchKernelGGL(( get_d02d), dim3(1), dim3(1), 0, 0, dx_, dz_, nnx, nnz, npd, vp);
hipLaunchKernelGGL(( initial_coffe2d), dim3((nnx+511)/512), dim3(512), 0, 0, dt_,nx,coffx1,coffx2,acoffx1,acoffx2,npd);
hipLaunchKernelGGL(( initial_coffe2d), dim3((nnz+511)/512), dim3(512), 0, 0, dt_,nz,coffz1,coffz2,acoffz1,acoffz2,npd);
printf("--------------------------------------------------------\n");
printf("--- \n");
start = clock();
for(is=1;is<=ns;is++)
{
printf("--- IS=%3d \n",is);
hipMemset(u0, 0, nnz*nnx*sizeof(float)); hipMemset(u1, 0, nnz*nnx*sizeof(float));
hipMemset(w0, 0, nnz*nnx*sizeof(float)); hipMemset(w1, 0, nnz*nnx*sizeof(float));
hipMemset(P, 0, nnz*nnx*sizeof(float)); hipMemset(Q, 0, nnz*nnx*sizeof(float));
hipMemset(px0, 0, nnz*nnx*sizeof(float)); hipMemset(px1, 0, nnz*nnx*sizeof(float));
hipMemset(pz0, 0, nnz*nnx*sizeof(float)); hipMemset(pz1, 0, nnz*nnx*sizeof(float));
hipMemset(qx0, 0, nnz*nnx*sizeof(float)); hipMemset(qx1, 0, nnz*nnx*sizeof(float));
hipMemset(qz0, 0, nnz*nnx*sizeof(float)); hipMemset(qz1, 0, nnz*nnx*sizeof(float));
hipMemset(shot_Dev, 0, nt_*nx*sizeof(float));
for(it=0,t=dt_;it<nt_;it++,t+=dt_)
{
// if(it%100==0&&is==1)printf("--- is===%d it===%d\n",is,it);
hipLaunchKernelGGL(( add_source2d), dim3(1),dim3(1), 0, 0, pfac,fs,zs,nx,nz,nnx,nnz,dt_,t,favg_,wtype,npd,is,ds,P,Q);
hipLaunchKernelGGL(( update_vel2d), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npd,dt_,dx_,dz_,u0,w0,u1,w1,P,Q,coffx1,coffx2,coffz1,coffz2);
hipLaunchKernelGGL(( update_stress2d), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt_,dx_,dz_,u1,w1,P,Q,vp,npd,px1,px0,pz1,pz0,qx1,qx0,qz1,qz0,
acoffx1,acoffx2,acoffz1,acoffz2,deta,epsilu,fs,ds,zs,is,SV);
u0=u1; w0=w1; px0=px1; pz0=pz1; qx0=qx1; qz0=qz1;
hipLaunchKernelGGL(( shot_record2d), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npd, it, nt_, P, shot_Dev);
if((is==1)&&(it%50==0))
{
hipMemcpy(e, P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost);
fseek(fpsnap,(int)(it/50)*(nnx)*(nnz)*4L,0);
fwrite(e,4L,nnx*nnz,fpsnap);
}
}//it loop end
hipLaunchKernelGGL(( mute_directwave2d), dim3((nx*nt_+511)/512), dim3(512), 0, 0, nx,nt_,dt_,favg_,dx_,dz_,fs,ds,zs,is,vp,epsilu,shot_Dev,100);
hipMemcpy(shot_Hos, shot_Dev, nt_*nx*sizeof(float), hipMemcpyDeviceToHost);
fseek(fpshot,(is-1)*nt_*nx*sizeof(float),0);
fwrite(shot_Hos,sizeof(float),nt_*nx,fpshot);
}
end = clock();
/*********IS Loop end*********/
printf("--- The forward is over \n");
printf("--- Complete!!!!!!!!! \n");
printf("total %d shots: %f (s)\n", ns, ((float)(end-start))/CLOCKS_PER_SEC);
/***********close************/
fclose(fpsnap); fclose(fpshot);
/***********free*************/
hipFree(coffx1); hipFree(coffx2);
hipFree(coffz1); hipFree(coffz2);
hipFree(acoffx1); hipFree(acoffx2);
hipFree(acoffz1); hipFree(acoffz2);
hipFree(u0); hipFree(u1);
hipFree(w0); hipFree(w1);
hipFree(P); hipFree(Q);
hipFree(px0); hipFree(px1);
hipFree(pz0); hipFree(pz1);
hipFree(qx0); hipFree(qx1);
hipFree(qz0); hipFree(qz1);
hipFree(shot_Dev);
hipFree(vp);
hipFree(epsilu);
hipFree(deta);
/***************host free*****************/
free(v); free(e); free(d);
free(shot_Hos);
// exit(0);
}
| ee0921a20e799acbc9c9d8cd48421c2ea6840cf3.cu | //a#########################################################
//a## 2D Acoustic VTI Medium Forward
//a##
//a## Ps :GPU(CUDA)
//a##
//a##/*a***************************
//a##Function for VTI medium modeling,
//a##
//a## Ps: the function of modeling following:
//a##
//a## du/dt_=1/rho*dp/dx_ ,
//a## dv/dt_=1/rho*dp/dy_ ,
//a## dw/dt_=1/rho*dq/dz_ ,
//a## dp/dt_=rho*vpx^2*(du/dx_+dv/dy_)+rho*vp*vpn*dw/dz_ ,
//a## dq/dt_=rho*vp*vpn*(du/dx_+dv/dy_)+rho*vp^2*dw/dz_ ,
//a## vpx^2=vp^2*(1+2*epsilu);
//a## vpn^2=vp^2*(1+2*deta);
//a##
//a##*********a*******************/
//a##
//a## Rong Tao
//a##
//a#########################################################
#include<stdio.h>
#include<malloc.h>
#include<math.h>
#include<stdlib.h>
#include<string.h>
#include<cuda_runtime.h>
#define pi 3.141592653
__device__ float d0;
__device__ int mm=4;
__constant__ float c[4]={1.196289,-0.0797526,0.009570313,-0.0006975447};
void check_gpu_error2d (const char *msg)
/*< check GPU errors >*/
{
cudaError_t err = cudaGetLastError ();
if (cudaSuccess != err) {
printf("Cuda error: %s: %s\n", msg, cudaGetErrorString (err));
exit(0);
}
}
/*************func*******************/
void pad_vv2d(int nx,int nz,int nnx,int nnz,int npd,float *ee)
{
int ix,iz,id;
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(ix<npd){
ee[id]=ee[npd*nnz+iz]; //left
}else if(ix>=nnx-npd){
ee[id]=ee[(nnx-npd-1)*nnz+iz];//right
}
}
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(iz<npd){
ee[id]=ee[ix*nnz+npd];//up
}else if(iz>=nnz-npd){
ee[id]=ee[ix*nnz+nnz-npd-1];//down
}
//if(ee[id]==0){printf("ee[%d][%d]==0.0\n",ix,iz);exit(0);}
}
}
/*************func*******************/
bool read_file2d(const char FN1[],const char FN2[],const char FN3[],
int nx,int nz,int nnx,int nnz,float *vv,float *epsilu,float *deta,int npd)
{
int i,j,id,vmax=0.0;
FILE *fp1,*fp2,*fp3;
if((fp1=fopen(FN1,"rb"))==NULL){printf("error open <%s>!\n",FN1);return false;}
if((fp2=fopen(FN2,"rb"))==NULL){printf("error open <%s>!\n",FN2);return false;}
if((fp3=fopen(FN3,"rb"))==NULL){printf("error open <%s>!\n",FN3);return false;}
for(i=npd;i<nx+npd;i++)
{
for(j=npd;j<nz+npd;j++)
{
id=i*nnz+j;
fread(&vv[id],4L,1,fp1);if(vmax<vv[id])vmax=vv[id];
fread(&epsilu[id],4L,1,fp2);
fread(&deta[id],4L,1,fp3);
}
}
fclose(fp1);printf("vmax=%d\n",vmax);
fclose(fp2);
fclose(fp3);
return true;
}
/********************func**********************/
__global__ void get_d02d(float dx_,float dz_,int nnx,int nnz,int npd,float *vp)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<1)d0=10.0*vp[nnx*nnz/2]*log(100000.0)/(2.0*npd*((dx_+dz_)/2.0));
}
/*************func*******************/
__global__ void initial_coffe2d(float dt_,int nn,float *coff1,float *coff2,float *acoff1,float *acoff2,int npd)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nn+2*npd)
{
if(id<npd)
{
coff1[id]=1.0/(1.0+(dt_*d0*pow((npd-0.5-id)/npd,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt_*d0*pow((npd-0.5-id)/npd,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt_*d0*pow(((npd-id)*1.0)/npd,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt_*d0*pow(((npd-id)*1.0)/npd,2.0))/2.0);
}else if(id>=npd&&id<npd+nn){
coff1[id]=1.0;
coff2[id]=1.0;
acoff1[id]=1.0;
acoff2[id]=1.0;
}else{
coff1[id]=1.0/(1.0+(dt_*d0*pow((0.5+id-nn-npd)/npd,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt_*d0*pow((0.5+id-nn-npd)/npd,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt_*d0*pow(((id-nn-npd)*1.0)/npd,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt_*d0*pow(((id-nn-npd)*1.0)/npd,2.0))/2.0);
}
}
}
/*************func*******************/
__global__ void shot_record2d(int nnx, int nnz, int nx, int nz, int npd, int it, int nt_, float *P, float *shot)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nx)
{
shot[it+nt_*id]=P[npd+nnz*(id+npd)];
}
}
/*************func**************/
__global__ void mute_directwave2d(int nx,int nt,float dt,float favg,
float dx,float dz,int fs,int ds,int zs,int is,
float *vp,float *epsilu,float *shot,int tt)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int mu_t,mu_nt;
float mu_x,mu_z,mu_t0;
int ix=id/nt;
int it=id%nt;
if(id<nx*nt)
{
mu_x=dx*abs(ix-fs-(is-1)*ds);
mu_z=dz*zs;
mu_t0=sqrtf(pow(mu_x,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilu[1]));
mu_t=(int)(2.0/(dt*favg));
mu_nt=(int)(mu_t0/dt)+mu_t+tt;
if((it>(int)(mu_t0/dt)-tt)&&it<mu_nt)
shot[id]=0.0;
}
}
//a################################################################################
__global__ void add_source2d(float pfac,float xsn,float zsn,int nx,int nz,int nnx,int nnz,float dt_,float t,
float favg_,int wtype,int npd,int is,int ds,float *P,float *Q)
/*< generate ricker wavelet with time deley >*/
{
int ixs,izs;
float x_,xx_,tdelay,ts,source=0.0,fs;
tdelay=1.0/favg_;
ts=t-tdelay;
fs=xsn+(is-1)*ds;
if(wtype==1)//ricker wavelet
{
x_=favg_*ts;
xx_=x_*x_;
source=(1-2*pi*pi*(xx_))*exp(-(pi*pi*xx_));
}else if(wtype==2){//derivative of gaussian
x_=(-4)*favg_*favg_*pi*pi/log(0.1);
source=(-2)*pi*pi*ts*exp(-x_*ts*ts);
}else if(wtype==3){//derivative of gaussian
x_=(-1)*favg_*favg_*pi*pi/log(0.1);
source=exp(-x_*ts*ts);
}
if(t<=2*tdelay)
{
ixs = (int)(fs+0.5)+npd-1;
izs = (int)(zsn+0.5)+npd-1;
P[ixs*nnz+izs]+=pfac*source;
Q[ixs*nnz+izs]+=pfac*source;
}
}
/*******************func*********************/
__global__ void update_vel2d(int nx,int nz,int nnx,int nnz,int npd,float dt_,float dx_,float dz_,
float *u0,float *w0,float *u1,float *w1,float *P,float *Q,
float *coffx1,float *coffx2,float *coffz1,float *coffz2)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix,iz,im;
float dtx,dtz,xx,zz;
ix=id/nnz;
iz=id%nnz;
dtx=dt_/dx_;
dtz=dt_/dz_;
if(id>=mm&&id<nnx*nnz-mm)
{
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(P[id+(im+1)*nnz]-P[id-im*nnz]);
zz+=c[im]*(Q[id+im+1] -Q[id-im]);
}
u1[id]=coffx2[ix]*u0[id]-coffx1[ix]*dtx*xx;
w1[id]=coffz2[iz]*w0[id]-coffz1[iz]*dtz*zz;
}
}
}
/*******************func***********************/
__global__ void update_stress2d(int nx,int nz,int nnx,int nnz,float dt_,float dx_,float dz_,
float *u1,float *w1,float *P,float *Q,float *vp,int npd,
float *px1,float *px0,float *pz1,float *pz0,float *qx1,float *qx0,float *qz1,float *qz0,
float *acoffx1,float *acoffx2,float *acoffz1,float *acoffz2,
float *deta,float *epsilu,int fs,int ds,int zs,int is,int SV)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int im,ix,iz,rx,rz,R=15,r=4;
float dtx,dtz, xx,zz,ee,dd;
ix=id/nnz;
iz=id%nnz;
dtx=dt_/dx_;
dtz=dt_/dz_;
if(id>=mm&&id<nnx*nnz-mm)
{
/************************i****************************************/
/************************iso circle start*************************/
rx=ix-(fs+(is-1)*ds+npd);
rz=iz-(zs+npd);
if(SV){
if((rx*rx+rz*rz)<=R*R){
if((rx*rx+rz*rz)<=r*r){
ee = 0.0;
dd = 0.0;
}else{
ee = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*epsilu[id];
dd = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*deta[id];
}
}else{
ee=epsilu[id];
dd=deta[id];
}
}else{
ee=epsilu[id];
dd=deta[id];
}
/************************ iso circle end *************************/
/************************i****************************************/
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(u1[id+im*nnz]-u1[id-(im+1)*nnz]);
zz+=c[im]*(w1[id+im] -w1[id-im-1]);
}
px1[id]=acoffx2[ix]*px0[id]-acoffx1[ix]*vp[id]*vp[id]*(1+2*ee)*dtx*xx;
pz1[id]=acoffz2[iz]*pz0[id]-acoffz1[iz]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtz*zz;
qx1[id]=acoffx2[ix]*qx0[id]-acoffx1[ix]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtx*xx;
qz1[id]=acoffz2[iz]*qz0[id]-acoffz1[iz]*vp[id]*vp[id]*dtz*zz;
P[id]=px1[id]+pz1[id];
Q[id]=qx1[id]+qz1[id];
}
}
}
//###################################model#######################################
extern "C" void GPU_vti2dfd(int nx, int nz,int dx,int dz,int npd,int SV,
const char FNv[],const char FNe[],const char FNd[],
int favg,int ns,int fs,int ds,int zs,
const char FNshot[],const char FNsnap[],int nt, int dt,int run_count)
{
//int ite=0;
//loop1:if(ite>0){printf("Please ensure parameter input accurately! \n");ite++;}
float dx_,dz_,favg_,dt_,pfac;
dx_=(float)dx;
dz_=(float)dz;
favg_=(float)favg;
printf("\n##### model start #####\n");
printf("# nx=%2d, dx=%.2f, npd=%d\n",nx,dx_,npd);
printf("# nz=%2d, dz=%.2f, SV=%d\n",nz,dz_,SV);
printf("# vel=<%s>\n",FNv);
printf("# epsilu=<%s>\n",FNe);
printf("# deta=<%s>\n",FNd);
printf("# favg=%.2f\n",favg_);
printf("# ns=%3d\n",ns);
printf("# fs=%3d\n",fs);
printf("# ds=%3d\n",ds);
printf("# zs=%3d\n",zs);
printf("# shot=<%s>\n",FNshot);
printf("# snap=<%s>\n",FNsnap);
FILE *fpsnap, *fpshot;
fpshot=fopen(FNshot,"wb");
fpsnap=fopen(FNsnap,"wb");
int is, it, nnx, nnz, nt_, wtype;
float *v, *e, *d, t;
float *vp, *epsilu, *deta;
float *u0, *u1, *px0, *qx0, *px1, *qx1;
float *w0, *w1, *pz0, *qz0, *pz1, *qz1;
float *P, *Q, *shot_Dev, *shot_Hos;
float *coffx1,*coffx2,*coffz1,*coffz2,*acoffx1,*acoffx2,*acoffz1,*acoffz2;
clock_t start, end;
wtype=1;
nt_=nt;
dt_=(float)(dt*1.0/1000000);
pfac=10.0;
nnx=nx+2*npd;
nnz=nz+2*npd;
v=(float*)malloc(nnz*nnx*sizeof(float));
e=(float*)malloc(nnz*nnx*sizeof(float));
d=(float*)malloc(nnz*nnx*sizeof(float));
shot_Hos=(float*)malloc(nt_*nx*sizeof(float));
if(read_file2d(FNv,FNe,FNd,nx,nz,nnx,nnz,v,e,d,npd))printf("Read file done !\n");
else{printf("Read file error !\n");return;}
pad_vv2d(nx,nz,nnx,nnz,npd,e);
pad_vv2d(nx,nz,nnx,nnz,npd,d);
pad_vv2d(nx,nz,nnx,nnz,npd,v);
cudaSetDevice(0);// initialize device, default device=0;
if(run_count==0)check_gpu_error2d("Failed to initialize device!");
/****************************/
cudaMalloc(&vp, nnz*nnx*sizeof(float));
cudaMalloc(&epsilu, nnz*nnx*sizeof(float));
cudaMalloc(&deta, nnz*nnx*sizeof(float));
cudaMemcpy(vp, v, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(epsilu, e, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deta, d, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice);
/****************************/
cudaMalloc(&u0, nnz*nnx*sizeof(float)); cudaMalloc(&u1, nnz*nnx*sizeof(float));
cudaMalloc(&w0, nnz*nnx*sizeof(float)); cudaMalloc(&w1, nnz*nnx*sizeof(float));
cudaMalloc(&P, nnz*nnx*sizeof(float)); cudaMalloc(&Q, nnz*nnx*sizeof(float));
cudaMalloc(&px0, nnz*nnx*sizeof(float)); cudaMalloc(&px1, nnz*nnx*sizeof(float));
cudaMalloc(&pz0, nnz*nnx*sizeof(float)); cudaMalloc(&pz1, nnz*nnx*sizeof(float));
cudaMalloc(&qx0, nnz*nnx*sizeof(float)); cudaMalloc(&qx1, nnz*nnx*sizeof(float));
cudaMalloc(&qz0, nnz*nnx*sizeof(float)); cudaMalloc(&qz1, nnz*nnx*sizeof(float));
cudaMalloc(&coffx1, nnx*sizeof(float)); cudaMalloc(&coffx2, nnx*sizeof(float));
cudaMalloc(&coffz1, nnz*sizeof(float)); cudaMalloc(&coffz2, nnz*sizeof(float));
cudaMalloc(&acoffx1, nnx*sizeof(float)); cudaMalloc(&acoffx2, nnx*sizeof(float));
cudaMalloc(&acoffz1, nnz*sizeof(float)); cudaMalloc(&acoffz2, nnz*sizeof(float));
cudaMalloc(&shot_Dev, nx*nt_*sizeof(float));
if(run_count==0)check_gpu_error2d("Failed to allocate memory for variables!");
get_d02d<<<1, 1>>>(dx_, dz_, nnx, nnz, npd, vp);
initial_coffe2d<<<(nnx+511)/512, 512>>>(dt_,nx,coffx1,coffx2,acoffx1,acoffx2,npd);
initial_coffe2d<<<(nnz+511)/512, 512>>>(dt_,nz,coffz1,coffz2,acoffz1,acoffz2,npd);
printf("--------------------------------------------------------\n");
printf("--- \n");
start = clock();
for(is=1;is<=ns;is++)
{
printf("--- IS=%3d \n",is);
cudaMemset(u0, 0, nnz*nnx*sizeof(float)); cudaMemset(u1, 0, nnz*nnx*sizeof(float));
cudaMemset(w0, 0, nnz*nnx*sizeof(float)); cudaMemset(w1, 0, nnz*nnx*sizeof(float));
cudaMemset(P, 0, nnz*nnx*sizeof(float)); cudaMemset(Q, 0, nnz*nnx*sizeof(float));
cudaMemset(px0, 0, nnz*nnx*sizeof(float)); cudaMemset(px1, 0, nnz*nnx*sizeof(float));
cudaMemset(pz0, 0, nnz*nnx*sizeof(float)); cudaMemset(pz1, 0, nnz*nnx*sizeof(float));
cudaMemset(qx0, 0, nnz*nnx*sizeof(float)); cudaMemset(qx1, 0, nnz*nnx*sizeof(float));
cudaMemset(qz0, 0, nnz*nnx*sizeof(float)); cudaMemset(qz1, 0, nnz*nnx*sizeof(float));
cudaMemset(shot_Dev, 0, nt_*nx*sizeof(float));
for(it=0,t=dt_;it<nt_;it++,t+=dt_)
{
// if(it%100==0&&is==1)printf("--- is===%d it===%d\n",is,it);
add_source2d<<<1,1>>>(pfac,fs,zs,nx,nz,nnx,nnz,dt_,t,favg_,wtype,npd,is,ds,P,Q);
update_vel2d<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npd,dt_,dx_,dz_,u0,w0,u1,w1,P,Q,coffx1,coffx2,coffz1,coffz2);
update_stress2d<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt_,dx_,dz_,u1,w1,P,Q,vp,npd,px1,px0,pz1,pz0,qx1,qx0,qz1,qz0,
acoffx1,acoffx2,acoffz1,acoffz2,deta,epsilu,fs,ds,zs,is,SV);
u0=u1; w0=w1; px0=px1; pz0=pz1; qx0=qx1; qz0=qz1;
shot_record2d<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npd, it, nt_, P, shot_Dev);
if((is==1)&&(it%50==0))
{
cudaMemcpy(e, P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost);
fseek(fpsnap,(int)(it/50)*(nnx)*(nnz)*4L,0);
fwrite(e,4L,nnx*nnz,fpsnap);
}
}//it loop end
mute_directwave2d<<<(nx*nt_+511)/512, 512>>>(nx,nt_,dt_,favg_,dx_,dz_,fs,ds,zs,is,vp,epsilu,shot_Dev,100);
cudaMemcpy(shot_Hos, shot_Dev, nt_*nx*sizeof(float), cudaMemcpyDeviceToHost);
fseek(fpshot,(is-1)*nt_*nx*sizeof(float),0);
fwrite(shot_Hos,sizeof(float),nt_*nx,fpshot);
}
end = clock();
/*********IS Loop end*********/
printf("--- The forward is over \n");
printf("--- Complete!!!!!!!!! \n");
printf("total %d shots: %f (s)\n", ns, ((float)(end-start))/CLOCKS_PER_SEC);
/***********close************/
fclose(fpsnap); fclose(fpshot);
/***********free*************/
cudaFree(coffx1); cudaFree(coffx2);
cudaFree(coffz1); cudaFree(coffz2);
cudaFree(acoffx1); cudaFree(acoffx2);
cudaFree(acoffz1); cudaFree(acoffz2);
cudaFree(u0); cudaFree(u1);
cudaFree(w0); cudaFree(w1);
cudaFree(P); cudaFree(Q);
cudaFree(px0); cudaFree(px1);
cudaFree(pz0); cudaFree(pz1);
cudaFree(qx0); cudaFree(qx1);
cudaFree(qz0); cudaFree(qz1);
cudaFree(shot_Dev);
cudaFree(vp);
cudaFree(epsilu);
cudaFree(deta);
/***************host free*****************/
free(v); free(e); free(d);
free(shot_Hos);
// exit(0);
}
|
772b6e2b7a8280653e3d4f514c935f2453f13559.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_smooth.cuh"
#include "map.h"
#include <assert.h>
#include <iostream>
#include "Utilities.h"
#include "CudaUtilities_hip.cuh"
#include "Timer.h"
#include "smooth.h"
void example_gpu_smooth_expl_generic(const char* str, const int w, const int h) {
const size_t sz = w * h * sizeof(float);
auto t = new Timer();
cout << "Allocating " << sz/(1024*1024) << " megabytes for src" << endl;
float* src = nullptr;
hipMalloc(&src, sz);
check("hipMalloc src");
cout << "Allocating " << sz / (1024 * 1024) << " megabytes for dest" << endl;
float* dest = nullptr;
hipMalloc(&dest, sz);
check("hipMalloc dest");
cout << "fill" << endl;
t->start();
gpu_fill_example(src, w, h);
t->stop();
cout << "duration of gpu_fill_example " << t->delta() << endl;
check("gpu_fill_example");
cout << "smooth" << endl;
t->start();
gpu_smooth(src, dest, w, h);
t->stop();
cout << "duration of " << str << " " << t->delta() << endl;
check("gpu_smooth");
cout << "copy d2h" << endl;
t->start();
float* destH = nullptr;
hipHostMalloc(&destH, sz);
check("hipHostMalloc destH");
hipMemcpy(destH, dest, sz, hipMemcpyDeviceToHost);
t->stop();
cout << "duration of hipMemcpy d2h " << t->delta() << endl;
check("hipMemcpy");
hipFree(src);
check("hipFree src");
hipFree(dest);
check("hipFree dest");
hipHostFree(destH);
check("hipHostFree destH");
}
void example_gpu_smooth_expl() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl", smooth_tiny_w, smooth_tiny_h);
}
void example_gpu_smooth_expl_small() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl_small", smooth_small_w, smooth_small_h);
}
void example_gpu_smooth_expl_large() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl_large", smooth_large_w, smooth_large_h);
}
void example_gpu_smooth_expl_huge() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl_huge", smooth_huge_w, smooth_huge_h);
}
| 772b6e2b7a8280653e3d4f514c935f2453f13559.cu | #include "gpu_smooth.cuh"
#include "map.h"
#include <assert.h>
#include <iostream>
#include "Utilities.h"
#include "CudaUtilities.cuh"
#include "Timer.h"
#include "smooth.h"
void example_gpu_smooth_expl_generic(const char* str, const int w, const int h) {
const size_t sz = w * h * sizeof(float);
auto t = new Timer();
cout << "Allocating " << sz/(1024*1024) << " megabytes for src" << endl;
float* src = nullptr;
cudaMalloc(&src, sz);
check("cudaMalloc src");
cout << "Allocating " << sz / (1024 * 1024) << " megabytes for dest" << endl;
float* dest = nullptr;
cudaMalloc(&dest, sz);
check("cudaMalloc dest");
cout << "fill" << endl;
t->start();
gpu_fill_example(src, w, h);
t->stop();
cout << "duration of gpu_fill_example " << t->delta() << endl;
check("gpu_fill_example");
cout << "smooth" << endl;
t->start();
gpu_smooth(src, dest, w, h);
t->stop();
cout << "duration of " << str << " " << t->delta() << endl;
check("gpu_smooth");
cout << "copy d2h" << endl;
t->start();
float* destH = nullptr;
cudaMallocHost(&destH, sz);
check("cudaMallocHost destH");
cudaMemcpy(destH, dest, sz, cudaMemcpyDeviceToHost);
t->stop();
cout << "duration of cudaMemcpy d2h " << t->delta() << endl;
check("cudaMemcpy");
cudaFree(src);
check("cudaFree src");
cudaFree(dest);
check("cudaFree dest");
cudaFreeHost(destH);
check("cudaFreeHost destH");
}
void example_gpu_smooth_expl() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl", smooth_tiny_w, smooth_tiny_h);
}
void example_gpu_smooth_expl_small() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl_small", smooth_small_w, smooth_small_h);
}
void example_gpu_smooth_expl_large() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl_large", smooth_large_w, smooth_large_h);
}
void example_gpu_smooth_expl_huge() {
example_gpu_smooth_expl_generic("example_gpu_smooth_expl_huge", smooth_huge_w, smooth_huge_h);
}
|
5aefeafa57ee5fcc73c4fcc1c87ed4477483baec.hip | // !!! This is a file automatically generated by hipify!!!
//--------------------------------------------------------------
// The Lanczos routine computes the Lanczos matrix
// Diagonalization of the Lanczosmatrix must be done separately
// in the routine LanczosDiag
//
// This file contains the standard serial implementation of Lanczos
//
//
//--------------------------------------------------------------
using namespace std;
#ifdef DEBUG
const bool WTRACE=true;
#else
const bool WTRACE=false;
#endif
#ifdef DEBUG2
const bool TRACE=true;
#else
const bool TRACE=false;
#endif
#include<iostream>
#include<iomanip>
const double TINYNUMBER=1.e-15; // numbers smaller than this are effectively considered 0.
const double LANCZOSEXITLIMIT=1.e-15;
#include "unistd.h"
#include "lanczos.h"
#include "rnddef.h"
#include "hip/hip_runtime.h"
//#include <rocblas.h>
#include "hip/hip_complex.h"
#include "cuda_global.h"
#include "cuda_precision.h"
#include "lanczos_cuda.h"
#include "lowlevelroutines.h"
#include "global.h"
//Lanczos::Lanczos(MyMatrix& H_in,int M_in){impl = new Lanczos_impl(H_in,M_in);} // initializes Lanczos with random vector
Lanczos::Lanczos(MyMatrix& H_in,int M_in,int basestatenr){impl = new Lanczos_impl(H_in,M_in,basestatenr);} // initializes Lanczos with a basestate
Lanczos::Lanczos(MyMatrix& H_in,int M_in, vector<MyField>& vec_in){impl = new Lanczos_impl(H_in,M_in,vec_in);} // initializes Lanczos using the input vector
Lanczos::~Lanczos(){delete impl;}
void Lanczos::DoLanczos(){impl->DoLanczos();}
MyField* Lanczos::GetInitVector(){return impl->GetInitVector();}
MyField* Lanczos::GetPhi(const int i){return impl->GetPhi(i);}
void Lanczos::DiagonalizeLanczosMatrix(){impl->DiagonalizeLanczosMatrix();}
vector<double>& Lanczos::GetLanczosa(){return impl->GetLanczosa();}
vector<double>& Lanczos::GetLanczosb(){return impl->GetLanczosb();} // we start numbering b from b0,b1 etc.
vector<double>& Lanczos::GetEigenvalues(){return impl->GetEigenvalues();}
double* Lanczos::GetEigenvector(const int i){return impl->GetEigenvector(i);}
int Lanczos::GetNeigenvalues(){return impl->GetNeigenvalues();}
int Lanczos::GetNLanczosIterations(){return impl->GetNLanczosIterations();}
double Lanczos::GetNormofInitVector(){return impl->GetNormofInitVector();}
MyField* Lanczos::GetLanczosEigenvectorinStateSpaceBasis(const int n){return impl->GetLanczosEigenvectorinStateSpaceBasis(n);}
Lanczos_impl::Lanczos_impl(MyMatrix& H_in,int M_in,int basestatenr):
H(H_in),N(H.Ncols()),M(min(N,M_in)),Mstop(M),Niterations(0),phi(NULL),
lanczos_a(M),lanczos_b(M),LM(0),initnorm(1.),err(hipSuccess),
Rows(H_in.Nrows()),Cols(H_in.Ncols()),Nc(H_in.NStoredCols()),Ntot(Rows*Nc),
d_m_val(NULL),d_m_col(NULL),d_phi(NULL)
{
if(WTRACE) cout << "Starting Lanczos, M=" << M << " with base state " << basestatenr << " Rows:" << Rows << " Cols:" << Cols << " Nc: " << Nc << " Ntot:" << Ntot << endl;
phi = new MyField[(M+1)*Cols]; // allocate space for Lanczos vectors
for(int i=0; i<(M+1)*Cols; i++){ phi[i]=MyField(0.,0.); }
if(basestatenr >=0 && basestatenr < Cols)
{
if(TRACE) cout << "Setting base state: " << basestatenr << endl;
phi[basestatenr]=complex<double>(1.,0.); // initialize the vector
initnorm=1.;
}
else
{ // Choose a random basestate if basestatenr is -1 or >#of states
RandomInitialization(); // pick a random state
if(TRACE)
{
cout << "phi[0]:" << endl;
for(int i=0; i<Cols; i++) cout << setprecision(DEBUGPRECISION) << phi[i] << endl;
}
}
UploadMatrixToDevice(H,&d_m_val,&d_m_col);
AllocateSpaceOnDevice(&d_phi,(M+1)*Cols);
UploadToDeviceAndExpand(phi,(M+1)*Cols,&d_phi);
Normalize(0); // normalize the random vector
if(TRACE)
{
cout << "phi[0]:" << endl;
InspectDevice(&d_phi,Cols);
}
if(TRACE)
{
cout << "the matrix is: " << H << endl;
}
}
Lanczos_impl::Lanczos_impl(MyMatrix& H_in,int M_in, vector<MyField>& vec_in):
H(H_in),N(H.Ncols()),M(min(N,M_in)),Mstop(M),Niterations(0),phi(NULL),
lanczos_a(M),lanczos_b(M),LM(0),initnorm(1.),err(hipSuccess),
Rows(H_in.Nrows()),Cols(H_in.Ncols()),Nc(H_in.NStoredCols()),Ntot(Rows*Nc),
d_m_val(NULL),d_m_col(NULL),d_phi(NULL)
{
if(WTRACE) cout << "Starting Lanczos with initial vector, M=" << M << endl;
phi = new MyField[(M+1)*Cols]; // allocate space for Lanczos vectors
for(int i=0; i<(M+1)*Cols; i++){ phi[i]=MyField(0.,0.); }
for(int i=0; i<Cols; i++) phi[i]=vec_in[i]; // initialize the vector
if(TRACE)
{
cout << "phi[0]:" << endl;
for(int i=0; i<Cols; i++) cout << phi[i] << " ";
}
UploadMatrixToDevice(H,&d_m_val,&d_m_col);
AllocateSpaceOnDevice(&d_phi,(M+1)*Cols);
UploadToDeviceAndExpand(phi,(M+1)*Cols,&d_phi);
// if(TRACE){ cout << "phi[0] after upload" << endl; PrintOutPhi(0);}
initnorm=Normalize(0); // record the normalization of the init vector, and normalize it
if(TRACE) cout << "Init norm is: " << initnorm << endl;
if(abs(initnorm) < TINYNUMBER){Mstop=0; Niterations=0;} // in case the starting vector is the zero vector
}
Lanczos_impl::~Lanczos_impl()
{
if(WTRACE) cout << "Destroying class Lanczos" << endl;
// hipblasDestroy(handle);
delete[] phi;
delete LM;
}
void Lanczos_impl::DoLanczos()
{
if(TRACE) cout << "Starting DoLanczos (lanczos_cuda.cu)" << endl;
int m=0;
bool continueiteration=true;
while(continueiteration && m<Mstop){
if(TRACE) cout << "Doing Lanczos step m:" << m << " M: " << M << endl;
continueiteration=LanczosIteration(m++);
}
Niterations=m; // gives the number of lanczos vectors
if(TRACE){
cout << "There are in all " << Niterations << " orthogonal Lanczos vectors" << endl;
if(Niterations >0)
{
for(int i=0; i<Niterations-1; i++){
cout << "a[" << i << "]=" << lanczos_a[i] << " b[" << i << "]="<< lanczos_b[i] << endl;}
cout << "a[" << Niterations-1 << "]=" << lanczos_a[Niterations-1] << endl;
}
}
DownloadFromDeviceAndContract(&d_phi,(M+1)*Cols,phi);
// FreeMatrixOnDevice(&d_m_val,&d_m_col);
FreeMemoryOnDevice(&d_phi);
FreeMemoryOnDevice(&d_m_col);
FreeMemoryOnDevice(&d_m_val);
}
double Lanczos_impl::Normalize(int i)
{
if(WTRACE) cout << "in Lanczos_impl::Normalize" << endl;
double norm=0.;
/*
cuda_Dznrm2(Cols,&d_phi[i*Cols],&norm);
// hipblasDznrm2(handle,Cols,&d_phi[i*Cols],1,&norm);
if(TRACE) cout << "Norm is " << setprecision(DEBUGPRECISION) << norm << endl;
double factor=1./norm;
cuda_Zdscal(Cols,&factor,&d_phi[i*Cols]);
// hipblasZdscal(handle,Cols,&factor,&d_phi[i*Cols],1);
*/
cudap_Normalize(Cols,&d_phi[i*Cols],&norm);
if(TRACE) cout << "Norm is " << setprecision(DEBUGPRECISION) << norm << endl;
return norm;
}
/*
double Lanczos_impl::GetNorm(const int n,cuda_cmplx* d_z)
{
if(WTRACE) cout << "in Lanczos_impl::GetNorm" << endl;
double norm=0.;
cuda_Dznrm2(n,d_z,&norm);
//hipblasDznrm2(handle,n,d_z,1,&norm);
return norm;
}
*/
void Lanczos_impl::RandomInitialization()
{
if(WTRACE) cout << "In RandomInitialization" << endl;
MyField* phi0=&phi[0];
for(int i=0; i<Cols; i++)
{
double r_real=2.*RAN.Get()-1.;
double r_imag=2.*RAN.Get()-1.;
MyField r=MyField(r_real,r_imag); // random start
phi0[i]=r;
}
}
void Lanczos_impl::CheckOrthogonality(int n,cmplxformat* d_z,const int m)
{
cout << "Checking orthogonality:" << endl;
for(int i=0; i<=m; i++)
{
// cuda_cmplx q=cmplx_0;
cmplxformat qp;
cmplxformat* d_phii=&d_phi[i*n];
cudap_cmplxdotproduct_cmplx(n,d_phii,d_z,&qp); // q=<phi_i|z>
/*
cuda_cmplx* d_phii=&d_phi[i*n];
cuda_CmplexDotProductCmplx(n,d_phii,d_z,&q);
// hipblasZdotc(handle,n,d_phii,1,d_z,1,&q); // q=<z|phi_i>
*/
cuda_cmplx q;
q.x= todouble(qp.x);
q.y= todouble(qp.y);
if(cuCabs(q) > 1.e-13)
{
cout << "WARNING: Severe Loss of Orthogonality";
cout << "<phi[" << i << "]|z>=" << setprecision(DEBUGPRECISION) << q;
cout << endl;
}
}
}
void Lanczos_impl::Reorthogonalize(int n,cmplxformat* d_z,int m)
{
if(WTRACE) cout << "In Reorthogonalize" << endl;
for(int j=0; j<MGSREPEATSTEPS; j++) // 2 for Modified Gram-Schmidt with reorthogo..
for(int i=0; i<=m; i++)
{
cmplxformat q;
const cmplxformat* d_phii=&d_phi[i*n];
const cmplxformat* d_zconst= d_z;
cudap_cmplxdotproduct_cmplx(n,d_phii,d_zconst,&q); // q=<phi_i|z>
cudap_cmplx_Zmaxpy(n,&q,d_z,d_phii); // |z>=|z>-q*|phi_i>;
// cuda_ComplexDotProduct(n,d_phii,d_z,&q); // q=<phi_i|z>
//hipblasZdotc(handle,n,d_phii,1,d_z,1,&q); // q=<phi_i|z>
//cuda_cmplx negq=cuCsub(cmplx_0,q);
//cuda_Zaxpy(n,&negq,d_z,d_phii); // |z>=|z>-q*|phi_i>;
//hipblasZaxpy(handle,n,&negq, d_z,1,d_phii,1); // |z>=|z>-q*|phi_i>;
}
if(TRACE) cout << "Done Reorthogonalize" << endl;
}
bool Lanczos_impl::LanczosIteration(const int m)
{
if(WTRACE) cout << "Starting Lanczos iteration " << m << endl;
Niterations++;
cmplxformat* d_phim=&d_phi[m*Cols]; //already normalized
cmplxformat* d_z =&d_phi[(m+1)*Cols];
// if(TRACE) cout << "Launching cuda_mat_vec_multiply_cmplx with " << blocksPerGrid << " blocks and " << threadsPerBlock << " threads per block" << endl;
//cuda_mat_vec_multiply_cmplx<<<blocksPerGrid,threadsPerBlock>>>(Rows,Nc,d_m_col ,d_m_val, d_phim, d_z); // |z> = H |phi_m>
cudap_mat_vec_multiply_cmplx(Rows,Nc,d_m_col ,d_m_val, d_phim, d_z); // |z> = H |phi_m>
//cuda_mat_vec_multiply_cmplx_dd<<<blocksPerGrid,threadsPerBlock>>>(Rows,Nc,d_m_col ,d_m_val, d_phim, d_z); // |z> = H |phi_m>
err = hipGetLastError();
if(TRACE) cout << "end of cuda_mat_vec_multiply_cmplx, last error" << err << endl;
if(TRACE)
{
cout << "H * phi[" << m << "]" << endl;
InspectDevice(&d_z,Cols);
}
if(TRACE) cout << "Launching ComplexDotProduct" << endl;
cudap_realdotproduct_cmplx(Cols,d_phim,d_z,&planczos_am); // a_m=<phi_m|z>, should be real
cudap_real_Zmaxpy(Cols,&planczos_am,d_z,d_phim); // |z>=|z>-a_m*|phi_m>
lanczos_a[m]=todouble(planczos_am);
if(TRACE) cout << "lanczos_a[" << m << "]=" << lanczos_a[m] << endl;
/*
// hipblasZdotc(handle,Cols,d_phim,1,d_z,1,&a);
cuda_cmplx negam=make_cuDoubleComplex(-lanczos_a[m],0.);
cuda_Zaxpy(Cols,&negam,d_phim,d_z); // |z>=|z>-a_m*|phi_m>
//hipblasZaxpy(handle,Cols,&negam,d_phim,1,d_z,1); // |z>=|z>-a_m*|phi_m>
*/
if(m>0)
{
/*
cuda_cmplx negbmm1=make_cuDoubleComplex(-lanczos_b[m-1],0.);
cuda_cmplx* d_phimm1=&d_phi[(m-1)*Cols];
*/
cmplxformat* d_phimm1=&d_phi[(m-1)*Cols]; //already normalized
realformat* bmm1_ptr= &planczos_bm; // point to the previous lanczos_bm
cudap_real_Zmaxpy(Cols,bmm1_ptr,d_z,d_phimm1); // |z>=|z>-b_(m-1)*|phi_(m-1)>
//hipblasZaxpy(handle,Cols,&negbmm1,d_phimm1,1,d_z,1);//|z>=|z>-b_(m-1)*|phi_(m-1)>
}
#ifdef REORTHOGONALIZE
Reorthogonalize(Cols,d_z,m);
#else
#endif
if(TRACE) cout << "After Reorthogonalization" << endl;
if(TRACE) CheckOrthogonality(Cols,d_z,m);
realformat* bm_ptr= &planczos_bm;
cudap_norm_cmplx(Cols,d_z,bm_ptr);
lanczos_b[m]=todouble(planczos_bm);
if(m == M-1) // end of lanczos procedure
{
if(TRACE) cout << "Truncating the Lanczos procedure, reached max matrix size, lanczos_b[M-1]= " << lanczos_b[m] << " --> 0" << endl;
return false;
}
if(TRACE) cout << "lanczos_b[" << m << "]=" << lanczos_b[m] << endl;
if(abs(lanczos_b[m])<=LANCZOSEXITLIMIT){return false;} // stop the Lanczos procedure;
cudap_Zdinvscal(Cols,bm_ptr,d_z); // |z> -> |z>/bm
// double factor=1./beta;
// cuda_Zdscal(Cols,&factor,d_z); // |z> -> |z>/beta
//hipblasZdscal(handle,Cols,&factor,d_z,1); // |z> -> |z>/beta
if(TRACE){
cout << "Lanczos vector phi[" << m+1 << "]: " << endl;
InspectDevice(&d_z,Cols);
}
return true;
}
void Lanczos_impl::DiagonalizeLanczosMatrix()
{
if(Niterations==0) return;
delete LM;
LM = new LanczosMatrix(Niterations,lanczos_a,lanczos_b);
LM->Diagonalize();
}
MyField* Lanczos_impl::GetLanczosEigenvectorinStateSpaceBasis(const int n)
{
MyField* phiM=&phi[M*Cols]; // the last one is used as storage
double* nu=LM->Eigenvector(n);
for(int i=0; i<N; i++){ phiM[i]=0;} // initialize to zero.
for(int p=0; p<Niterations; p++)
{
MyField* phip=&phi[p*Cols];
AddToVector(&N,phiM,phip,&nu[p]);
}
return phiM;
}
| 5aefeafa57ee5fcc73c4fcc1c87ed4477483baec.cu | //--------------------------------------------------------------
// The Lanczos routine computes the Lanczos matrix
// Diagonalization of the Lanczosmatrix must be done separately
// in the routine LanczosDiag
//
// This file contains the standard serial implementation of Lanczos
//
//
//--------------------------------------------------------------
using namespace std;
#ifdef DEBUG
const bool WTRACE=true;
#else
const bool WTRACE=false;
#endif
#ifdef DEBUG2
const bool TRACE=true;
#else
const bool TRACE=false;
#endif
#include<iostream>
#include<iomanip>
const double TINYNUMBER=1.e-15; // numbers smaller than this are effectively considered 0.
const double LANCZOSEXITLIMIT=1.e-15;
#include "unistd.h"
#include "lanczos.h"
#include "rnddef.h"
#include "cuda_runtime.h"
//#include <cublas_v2.h>
#include "cuComplex.h"
#include "cuda_global.h"
#include "cuda_precision.h"
#include "lanczos_cuda.h"
#include "lowlevelroutines.h"
#include "global.h"
//Lanczos::Lanczos(MyMatrix& H_in,int M_in){impl = new Lanczos_impl(H_in,M_in);} // initializes Lanczos with random vector
Lanczos::Lanczos(MyMatrix& H_in,int M_in,int basestatenr){impl = new Lanczos_impl(H_in,M_in,basestatenr);} // initializes Lanczos with a basestate
Lanczos::Lanczos(MyMatrix& H_in,int M_in, vector<MyField>& vec_in){impl = new Lanczos_impl(H_in,M_in,vec_in);} // initializes Lanczos using the input vector
Lanczos::~Lanczos(){delete impl;}
void Lanczos::DoLanczos(){impl->DoLanczos();}
MyField* Lanczos::GetInitVector(){return impl->GetInitVector();}
MyField* Lanczos::GetPhi(const int i){return impl->GetPhi(i);}
void Lanczos::DiagonalizeLanczosMatrix(){impl->DiagonalizeLanczosMatrix();}
vector<double>& Lanczos::GetLanczosa(){return impl->GetLanczosa();}
vector<double>& Lanczos::GetLanczosb(){return impl->GetLanczosb();} // we start numbering b from b0,b1 etc.
vector<double>& Lanczos::GetEigenvalues(){return impl->GetEigenvalues();}
double* Lanczos::GetEigenvector(const int i){return impl->GetEigenvector(i);}
int Lanczos::GetNeigenvalues(){return impl->GetNeigenvalues();}
int Lanczos::GetNLanczosIterations(){return impl->GetNLanczosIterations();}
double Lanczos::GetNormofInitVector(){return impl->GetNormofInitVector();}
MyField* Lanczos::GetLanczosEigenvectorinStateSpaceBasis(const int n){return impl->GetLanczosEigenvectorinStateSpaceBasis(n);}
Lanczos_impl::Lanczos_impl(MyMatrix& H_in,int M_in,int basestatenr):
H(H_in),N(H.Ncols()),M(min(N,M_in)),Mstop(M),Niterations(0),phi(NULL),
lanczos_a(M),lanczos_b(M),LM(0),initnorm(1.),err(cudaSuccess),
Rows(H_in.Nrows()),Cols(H_in.Ncols()),Nc(H_in.NStoredCols()),Ntot(Rows*Nc),
d_m_val(NULL),d_m_col(NULL),d_phi(NULL)
{
if(WTRACE) cout << "Starting Lanczos, M=" << M << " with base state " << basestatenr << " Rows:" << Rows << " Cols:" << Cols << " Nc: " << Nc << " Ntot:" << Ntot << endl;
phi = new MyField[(M+1)*Cols]; // allocate space for Lanczos vectors
for(int i=0; i<(M+1)*Cols; i++){ phi[i]=MyField(0.,0.); }
if(basestatenr >=0 && basestatenr < Cols)
{
if(TRACE) cout << "Setting base state: " << basestatenr << endl;
phi[basestatenr]=complex<double>(1.,0.); // initialize the vector
initnorm=1.;
}
else
{ // Choose a random basestate if basestatenr is -1 or >#of states
RandomInitialization(); // pick a random state
if(TRACE)
{
cout << "phi[0]:" << endl;
for(int i=0; i<Cols; i++) cout << setprecision(DEBUGPRECISION) << phi[i] << endl;
}
}
UploadMatrixToDevice(H,&d_m_val,&d_m_col);
AllocateSpaceOnDevice(&d_phi,(M+1)*Cols);
UploadToDeviceAndExpand(phi,(M+1)*Cols,&d_phi);
Normalize(0); // normalize the random vector
if(TRACE)
{
cout << "phi[0]:" << endl;
InspectDevice(&d_phi,Cols);
}
if(TRACE)
{
cout << "the matrix is: " << H << endl;
}
}
Lanczos_impl::Lanczos_impl(MyMatrix& H_in,int M_in, vector<MyField>& vec_in):
H(H_in),N(H.Ncols()),M(min(N,M_in)),Mstop(M),Niterations(0),phi(NULL),
lanczos_a(M),lanczos_b(M),LM(0),initnorm(1.),err(cudaSuccess),
Rows(H_in.Nrows()),Cols(H_in.Ncols()),Nc(H_in.NStoredCols()),Ntot(Rows*Nc),
d_m_val(NULL),d_m_col(NULL),d_phi(NULL)
{
if(WTRACE) cout << "Starting Lanczos with initial vector, M=" << M << endl;
phi = new MyField[(M+1)*Cols]; // allocate space for Lanczos vectors
for(int i=0; i<(M+1)*Cols; i++){ phi[i]=MyField(0.,0.); }
for(int i=0; i<Cols; i++) phi[i]=vec_in[i]; // initialize the vector
if(TRACE)
{
cout << "phi[0]:" << endl;
for(int i=0; i<Cols; i++) cout << phi[i] << " ";
}
UploadMatrixToDevice(H,&d_m_val,&d_m_col);
AllocateSpaceOnDevice(&d_phi,(M+1)*Cols);
UploadToDeviceAndExpand(phi,(M+1)*Cols,&d_phi);
// if(TRACE){ cout << "phi[0] after upload" << endl; PrintOutPhi(0);}
initnorm=Normalize(0); // record the normalization of the init vector, and normalize it
if(TRACE) cout << "Init norm is: " << initnorm << endl;
if(abs(initnorm) < TINYNUMBER){Mstop=0; Niterations=0;} // in case the starting vector is the zero vector
}
Lanczos_impl::~Lanczos_impl()
{
if(WTRACE) cout << "Destroying class Lanczos" << endl;
// cublasDestroy(handle);
delete[] phi;
delete LM;
}
void Lanczos_impl::DoLanczos()
{
if(TRACE) cout << "Starting DoLanczos (lanczos_cuda.cu)" << endl;
int m=0;
bool continueiteration=true;
while(continueiteration && m<Mstop){
if(TRACE) cout << "Doing Lanczos step m:" << m << " M: " << M << endl;
continueiteration=LanczosIteration(m++);
}
Niterations=m; // gives the number of lanczos vectors
if(TRACE){
cout << "There are in all " << Niterations << " orthogonal Lanczos vectors" << endl;
if(Niterations >0)
{
for(int i=0; i<Niterations-1; i++){
cout << "a[" << i << "]=" << lanczos_a[i] << " b[" << i << "]="<< lanczos_b[i] << endl;}
cout << "a[" << Niterations-1 << "]=" << lanczos_a[Niterations-1] << endl;
}
}
DownloadFromDeviceAndContract(&d_phi,(M+1)*Cols,phi);
// FreeMatrixOnDevice(&d_m_val,&d_m_col);
FreeMemoryOnDevice(&d_phi);
FreeMemoryOnDevice(&d_m_col);
FreeMemoryOnDevice(&d_m_val);
}
double Lanczos_impl::Normalize(int i)
{
if(WTRACE) cout << "in Lanczos_impl::Normalize" << endl;
double norm=0.;
/*
cuda_Dznrm2(Cols,&d_phi[i*Cols],&norm);
// cublasDznrm2(handle,Cols,&d_phi[i*Cols],1,&norm);
if(TRACE) cout << "Norm is " << setprecision(DEBUGPRECISION) << norm << endl;
double factor=1./norm;
cuda_Zdscal(Cols,&factor,&d_phi[i*Cols]);
// cublasZdscal(handle,Cols,&factor,&d_phi[i*Cols],1);
*/
cudap_Normalize(Cols,&d_phi[i*Cols],&norm);
if(TRACE) cout << "Norm is " << setprecision(DEBUGPRECISION) << norm << endl;
return norm;
}
/*
double Lanczos_impl::GetNorm(const int n,cuda_cmplx* d_z)
{
if(WTRACE) cout << "in Lanczos_impl::GetNorm" << endl;
double norm=0.;
cuda_Dznrm2(n,d_z,&norm);
//cublasDznrm2(handle,n,d_z,1,&norm);
return norm;
}
*/
void Lanczos_impl::RandomInitialization()
{
if(WTRACE) cout << "In RandomInitialization" << endl;
MyField* phi0=&phi[0];
for(int i=0; i<Cols; i++)
{
double r_real=2.*RAN.Get()-1.;
double r_imag=2.*RAN.Get()-1.;
MyField r=MyField(r_real,r_imag); // random start
phi0[i]=r;
}
}
void Lanczos_impl::CheckOrthogonality(int n,cmplxformat* d_z,const int m)
{
cout << "Checking orthogonality:" << endl;
for(int i=0; i<=m; i++)
{
// cuda_cmplx q=cmplx_0;
cmplxformat qp;
cmplxformat* d_phii=&d_phi[i*n];
cudap_cmplxdotproduct_cmplx(n,d_phii,d_z,&qp); // q=<phi_i|z>
/*
cuda_cmplx* d_phii=&d_phi[i*n];
cuda_CmplexDotProductCmplx(n,d_phii,d_z,&q);
// cublasZdotc(handle,n,d_phii,1,d_z,1,&q); // q=<z|phi_i>
*/
cuda_cmplx q;
q.x= todouble(qp.x);
q.y= todouble(qp.y);
if(cuCabs(q) > 1.e-13)
{
cout << "WARNING: Severe Loss of Orthogonality";
cout << "<phi[" << i << "]|z>=" << setprecision(DEBUGPRECISION) << q;
cout << endl;
}
}
}
void Lanczos_impl::Reorthogonalize(int n,cmplxformat* d_z,int m)
{
if(WTRACE) cout << "In Reorthogonalize" << endl;
for(int j=0; j<MGSREPEATSTEPS; j++) // 2 for Modified Gram-Schmidt with reorthogo..
for(int i=0; i<=m; i++)
{
cmplxformat q;
const cmplxformat* d_phii=&d_phi[i*n];
const cmplxformat* d_zconst= d_z;
cudap_cmplxdotproduct_cmplx(n,d_phii,d_zconst,&q); // q=<phi_i|z>
cudap_cmplx_Zmaxpy(n,&q,d_z,d_phii); // |z>=|z>-q*|phi_i>;
// cuda_ComplexDotProduct(n,d_phii,d_z,&q); // q=<phi_i|z>
//cublasZdotc(handle,n,d_phii,1,d_z,1,&q); // q=<phi_i|z>
//cuda_cmplx negq=cuCsub(cmplx_0,q);
//cuda_Zaxpy(n,&negq,d_z,d_phii); // |z>=|z>-q*|phi_i>;
//cublasZaxpy(handle,n,&negq, d_z,1,d_phii,1); // |z>=|z>-q*|phi_i>;
}
if(TRACE) cout << "Done Reorthogonalize" << endl;
}
bool Lanczos_impl::LanczosIteration(const int m)
{
if(WTRACE) cout << "Starting Lanczos iteration " << m << endl;
Niterations++;
cmplxformat* d_phim=&d_phi[m*Cols]; //already normalized
cmplxformat* d_z =&d_phi[(m+1)*Cols];
// if(TRACE) cout << "Launching cuda_mat_vec_multiply_cmplx with " << blocksPerGrid << " blocks and " << threadsPerBlock << " threads per block" << endl;
//cuda_mat_vec_multiply_cmplx<<<blocksPerGrid,threadsPerBlock>>>(Rows,Nc,d_m_col ,d_m_val, d_phim, d_z); // |z> = H |phi_m>
cudap_mat_vec_multiply_cmplx(Rows,Nc,d_m_col ,d_m_val, d_phim, d_z); // |z> = H |phi_m>
//cuda_mat_vec_multiply_cmplx_dd<<<blocksPerGrid,threadsPerBlock>>>(Rows,Nc,d_m_col ,d_m_val, d_phim, d_z); // |z> = H |phi_m>
err = cudaGetLastError();
if(TRACE) cout << "end of cuda_mat_vec_multiply_cmplx, last error" << err << endl;
if(TRACE)
{
cout << "H * phi[" << m << "]" << endl;
InspectDevice(&d_z,Cols);
}
if(TRACE) cout << "Launching ComplexDotProduct" << endl;
cudap_realdotproduct_cmplx(Cols,d_phim,d_z,&planczos_am); // a_m=<phi_m|z>, should be real
cudap_real_Zmaxpy(Cols,&planczos_am,d_z,d_phim); // |z>=|z>-a_m*|phi_m>
lanczos_a[m]=todouble(planczos_am);
if(TRACE) cout << "lanczos_a[" << m << "]=" << lanczos_a[m] << endl;
/*
// cublasZdotc(handle,Cols,d_phim,1,d_z,1,&a);
cuda_cmplx negam=make_cuDoubleComplex(-lanczos_a[m],0.);
cuda_Zaxpy(Cols,&negam,d_phim,d_z); // |z>=|z>-a_m*|phi_m>
//cublasZaxpy(handle,Cols,&negam,d_phim,1,d_z,1); // |z>=|z>-a_m*|phi_m>
*/
if(m>0)
{
/*
cuda_cmplx negbmm1=make_cuDoubleComplex(-lanczos_b[m-1],0.);
cuda_cmplx* d_phimm1=&d_phi[(m-1)*Cols];
*/
cmplxformat* d_phimm1=&d_phi[(m-1)*Cols]; //already normalized
realformat* bmm1_ptr= &planczos_bm; // point to the previous lanczos_bm
cudap_real_Zmaxpy(Cols,bmm1_ptr,d_z,d_phimm1); // |z>=|z>-b_(m-1)*|phi_(m-1)>
//cublasZaxpy(handle,Cols,&negbmm1,d_phimm1,1,d_z,1);//|z>=|z>-b_(m-1)*|phi_(m-1)>
}
#ifdef REORTHOGONALIZE
Reorthogonalize(Cols,d_z,m);
#else
#endif
if(TRACE) cout << "After Reorthogonalization" << endl;
if(TRACE) CheckOrthogonality(Cols,d_z,m);
realformat* bm_ptr= &planczos_bm;
cudap_norm_cmplx(Cols,d_z,bm_ptr);
lanczos_b[m]=todouble(planczos_bm);
if(m == M-1) // end of lanczos procedure
{
if(TRACE) cout << "Truncating the Lanczos procedure, reached max matrix size, lanczos_b[M-1]= " << lanczos_b[m] << " --> 0" << endl;
return false;
}
if(TRACE) cout << "lanczos_b[" << m << "]=" << lanczos_b[m] << endl;
if(abs(lanczos_b[m])<=LANCZOSEXITLIMIT){return false;} // stop the Lanczos procedure;
cudap_Zdinvscal(Cols,bm_ptr,d_z); // |z> -> |z>/bm
// double factor=1./beta;
// cuda_Zdscal(Cols,&factor,d_z); // |z> -> |z>/beta
//cublasZdscal(handle,Cols,&factor,d_z,1); // |z> -> |z>/beta
if(TRACE){
cout << "Lanczos vector phi[" << m+1 << "]: " << endl;
InspectDevice(&d_z,Cols);
}
return true;
}
void Lanczos_impl::DiagonalizeLanczosMatrix()
{
if(Niterations==0) return;
delete LM;
LM = new LanczosMatrix(Niterations,lanczos_a,lanczos_b);
LM->Diagonalize();
}
MyField* Lanczos_impl::GetLanczosEigenvectorinStateSpaceBasis(const int n)
{
MyField* phiM=&phi[M*Cols]; // the last one is used as storage
double* nu=LM->Eigenvector(n);
for(int i=0; i<N; i++){ phiM[i]=0;} // initialize to zero.
for(int p=0; p<Niterations; p++)
{
MyField* phip=&phi[p*Cols];
AddToVector(&N,phiM,phip,&nu[p]);
}
return phiM;
}
|
ccab57cc6054be23aafc685d16743787e3342868.hip | // !!! This is a file automatically generated by hipify!!!
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* redirect_photon.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: jwalsh <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/05 16:00:41 by jwalsh #+# #+# */
/* Updated: 2017/06/08 16:45:00 by jwalsh ### ########.fr */
/* */
/* ************************************************************************** */
#include "rt.cuh"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__device__
static void scale_color(t_ray *ray, float p, float k, t_vec3 obj_col);
/*
** Redirection functions based on Russian Roullette result.
*/
__device__
void redirect_photon_diffuse(t_raytracing_tools *r, t_ray *ray,
float p)
{
t_vec3 rand_dir;
hiprandState_t state;
ray->origin = v_add(ray->hit, v_scale(ray->nhit, ray->n_dir * BIAS));
state = *r->devStates;
rand_dir.x = r->rand_list[(r->idx + 3) % (r->scene->photons_per_pass * 3)];
rand_dir.y = r->rand_list[(r->idx + 4) % (r->scene->photons_per_pass * 3)];
rand_dir.z = r->rand_list[(r->idx + 5) % (r->scene->photons_per_pass * 3)];
*r->devStates = state;
rand_dir = v_norm(rand_dir);
ray->dir = (v_dot(rand_dir, v_scale(ray->nhit, ray->n_dir)) < 0) ?
v_scale(rand_dir, -1) : rand_dir;
scale_color(ray, p, r->scene->objects[ray->hit_obj].kd,
r->scene->objects[ray->hit_obj].col);
}
__device__
void redirect_photon_specular(t_raytracing_tools *r, t_ray *ray,
float p)
{
ray->origin = v_add(ray->hit, v_scale(ray->nhit, ray->n_dir * BIAS));
ray->dir = reflect(ray->dir, v_scale(ray->nhit, ray->n_dir));
scale_color(ray, p, r->scene->objects[ray->hit_obj].reflection,
r->scene->objects[ray->hit_obj].col);
}
__device__
void redirect_photon_transmit(t_raytracing_tools *r, t_ray *ray,
float p)
{
float n1;
float n2;
update_ior(&n1, &n2, r, ray);
ray->ior = n2;
ray->origin = v_add(ray->hit, v_scale(ray->nhit, -ray->n_dir * BIAS));
ray->dir = refract(ray->dir, v_scale(ray->nhit, ray->n_dir), n1, n2);
scale_color(ray, p, r->scene->objects[ray->hit_obj].transparency,
r->scene->objects[ray->hit_obj].col);
}
/*
** ray: incoming ray.
** p: probability of particular redirection (diffuse, refelcted, refracted)
** k: coefficient of redirection (diffuse, reflection, transparency)
** obj_col: color of object hit
** P_refl = P_inc * k / p
*/
__device__
static void scale_color(t_ray *ray, float p, float k, t_vec3 obj_col)
{
t_color p_inc;
p_inc = ray->col;
ray->col.r = p_inc.r * k * (obj_col.x / (p * 255.0));
ray->col.g = p_inc.g * k * (obj_col.y / (p * 255.0));
ray->col.b = p_inc.b * k * (obj_col.z / (p * 255.0));
}
| ccab57cc6054be23aafc685d16743787e3342868.cu | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* redirect_photon.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: jwalsh <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/05 16:00:41 by jwalsh #+# #+# */
/* Updated: 2017/06/08 16:45:00 by jwalsh ### ########.fr */
/* */
/* ************************************************************************** */
#include "rt.cuh"
#include <curand.h>
#include <curand_kernel.h>
__device__
static void scale_color(t_ray *ray, float p, float k, t_vec3 obj_col);
/*
** Redirection functions based on Russian Roullette result.
*/
__device__
void redirect_photon_diffuse(t_raytracing_tools *r, t_ray *ray,
float p)
{
t_vec3 rand_dir;
curandState state;
ray->origin = v_add(ray->hit, v_scale(ray->nhit, ray->n_dir * BIAS));
state = *r->devStates;
rand_dir.x = r->rand_list[(r->idx + 3) % (r->scene->photons_per_pass * 3)];
rand_dir.y = r->rand_list[(r->idx + 4) % (r->scene->photons_per_pass * 3)];
rand_dir.z = r->rand_list[(r->idx + 5) % (r->scene->photons_per_pass * 3)];
*r->devStates = state;
rand_dir = v_norm(rand_dir);
ray->dir = (v_dot(rand_dir, v_scale(ray->nhit, ray->n_dir)) < 0) ?
v_scale(rand_dir, -1) : rand_dir;
scale_color(ray, p, r->scene->objects[ray->hit_obj].kd,
r->scene->objects[ray->hit_obj].col);
}
__device__
void redirect_photon_specular(t_raytracing_tools *r, t_ray *ray,
float p)
{
ray->origin = v_add(ray->hit, v_scale(ray->nhit, ray->n_dir * BIAS));
ray->dir = reflect(ray->dir, v_scale(ray->nhit, ray->n_dir));
scale_color(ray, p, r->scene->objects[ray->hit_obj].reflection,
r->scene->objects[ray->hit_obj].col);
}
__device__
void redirect_photon_transmit(t_raytracing_tools *r, t_ray *ray,
float p)
{
float n1;
float n2;
update_ior(&n1, &n2, r, ray);
ray->ior = n2;
ray->origin = v_add(ray->hit, v_scale(ray->nhit, -ray->n_dir * BIAS));
ray->dir = refract(ray->dir, v_scale(ray->nhit, ray->n_dir), n1, n2);
scale_color(ray, p, r->scene->objects[ray->hit_obj].transparency,
r->scene->objects[ray->hit_obj].col);
}
/*
** ray: incoming ray.
** p: probability of particular redirection (diffuse, refelcted, refracted)
** k: coefficient of redirection (diffuse, reflection, transparency)
** obj_col: color of object hit
** P_refl = P_inc * k / p
*/
__device__
static void scale_color(t_ray *ray, float p, float k, t_vec3 obj_col)
{
t_color p_inc;
p_inc = ray->col;
ray->col.r = p_inc.r * k * (obj_col.x / (p * 255.0));
ray->col.g = p_inc.g * k * (obj_col.y / (p * 255.0));
ray->col.b = p_inc.b * k * (obj_col.z / (p * 255.0));
}
|
218d70ea8b462b60c3f4676ec617493c4cac2dc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//int main(int argc, char **argv)
//{
// // memory size 128 MBs
// int isize = 1<<25;
// int nbytes = isize * sizeof(float);
//
// // allocate the host memory
// //float *h_a = (float *)malloc(nbytes);
// float *h_a;
// hipHostMalloc((float **)&h_a, nbytes);
//
// // allocate the device memory
// float *d_a;
// hipMalloc((float **)&d_a, nbytes);
//
// // initialize the host memory
// for(int i=0;i<isize;i++)
// h_a[i] = 7;
//
// // transfer data from the host to the device
// hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice);
//
// // transfer data from the device to the host
// hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost);
//
// // free memory
// hipFree(d_a);
// //free(h_a);
// hipHostFree(h_a);
//
// // reset device
// hipDeviceReset();
// return EXIT_SUCCESS;
//} | 218d70ea8b462b60c3f4676ec617493c4cac2dc3.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//int main(int argc, char **argv)
//{
// // memory size 128 MBs
// int isize = 1<<25;
// int nbytes = isize * sizeof(float);
//
// // allocate the host memory
// //float *h_a = (float *)malloc(nbytes);
// float *h_a;
// cudaMallocHost((float **)&h_a, nbytes);
//
// // allocate the device memory
// float *d_a;
// cudaMalloc((float **)&d_a, nbytes);
//
// // initialize the host memory
// for(int i=0;i<isize;i++)
// h_a[i] = 7;
//
// // transfer data from the host to the device
// cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
//
// // transfer data from the device to the host
// cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost);
//
// // free memory
// cudaFree(d_a);
// //free(h_a);
// cudaFreeHost(h_a);
//
// // reset device
// cudaDeviceReset();
// return EXIT_SUCCESS;
//} |
56567b28b0fd9b96ecf1121d577c42f201e4eeb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _HOTPLATE_KERNEL_H_
#define _HOTPLATE_KERNEL_H_
// The width of external cells that surround the plate's cells
#define BOUNDARYCELLS 2
#define THREAD_BLOCK_WIDTH 128
#define THREAD_BLOCK_HEIGHT 4
#define FOR(A) for(int A=1;A<=BOUNDARYCELLS;A++)
#define SYNCHRONIZATION __syncthreads()
#define NN_MEAN(array,a,b) (array[a][b+1]+array[a+1][b]+array[a-1][b]+array[a][b-1])*0.25
#define GHOST 4
//Used several macros as opposed to functions to provide opportunities for vectorization
#define GRID_BOTTOM_BLOCK (blockIdx.y==(gridDim.y-1))
#define LEFT_THREAD (!threadIdx.x)
#define TOP_THREAD (!threadIdx.y)
#define RIGHT_THREAD(threadIdx.x==(THREAD_BLOCK_WIDTH-1))
#define BOTTOM_THREAD(threadIdx.y==(THREAD_BLOCK_HEIGHT-1))
#define COPY(A,B,C) shared_cells[A][B]=gplate_block_cellsInput[C];
void create_snapshot(float **cells, int n_x, int n_y, int id);
__global__ void Kernel(
dim3 plateDims,
float* g_plateblock_cellsInput,
float* g_plateblock_cellsOutput, dim3 gridDims)
{
//Executing thread's location
dim3 myCell((blockIdx.x*blockDim.x)+threadIdx.x+BOUNDARYCELLS,
(blockIdx.y*blockDim.y)+threadIdx.y+BOUNDARYCELLS);
if ((myCell.x >= plateDims.x-BOUNDARYCELLS) || (myCell.y >= plateDims.y - BOUNDARYCELLS)) {
// My location is outside of the plate boundaries
return;
}
/*calculating positions with respect to 1D array and thread position on the grid*/
int left=myCell.x-1;
int right=myCell.x+1;
int top=myCell.y-1;
int bottom=myCell.y+1;
int me=myCell.x;
int bc2=BOUNDARYCELLS;
int x_offset_in_block=threadIdx.x+BOUNDARYCELLS; //column
int y_offset_in_block=threadIdx.y+BOUNDARYCELLS; //row
bc2=bc2<<1;
float res=0.0f;
__shared__ float shared_cells[THREAD_BLOCK_HEIGHT+(2*BOUNDARYCELLS)][THREAD_BLOCK_WIDTH+(BOUNDARYCELLS*2)];
shared_cells[y_offset_in_block][x_offset_in_block]=g_plateblock_cellsInput[myCell.y*plateDims.x+myCell.x]; //I put my value first
//Top thread gets the required cells at the top
if(TOP_THREAD)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
COPY(temp, xoffset_in_block, top*plateDims.x+me)
}
//Left thread gets ghost cells
if(LEFT_THREAD)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
FOR(j)
{
left=myCell.x-j;
COPY(temp, xoffset_in_block, top*plateDims.x+left)
}
}
}
//unless the executing thread handles the right block
if(!GRID_RIGHT_BLOCK)
{
//Right Threads get ghost cells
if(RIGHT_THREAD)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
FOR(j)
{
right=myCell.x+j;
COPY(temp, xoffset_in_block, top*plateDims.x+right)
}
}
}
}
//Executing thread handles grid's right block
else
{
//Last thread in the right block
if(threadIdx.x==(plateDims.x-(bc2)-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
FOR(j)
{
right=myCell.x+j;
COPY(temp, xoffset_in_block + j, top*plateDims.x+right)
}
}
}
}
}
if(!GRID_BOTTOM_BLOCK)
{
//bottom thread gets required bottom cells
if(BOTTOM_THREAD)
{
FOR(i)
{
int temp=y_offset_in_block+i;
bottom=myCell.y+i;
COPY(temp, xoffset_in_block + j, bottom*plateDims.x+me)
}
//get ghost cells
if(LEFT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
left=myCell.x-j;
COPY(y_offset_in_block+i, xoffset_in_block - j, bottom*plateDims.x+left)
}
}
}
if(GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
int temp=y_offset_in_block+i;
FOR(j)
{
COPY(temp, x_offset_in_block+j, bottom*plateDims.x+right)
}
}
}
}
else
{
if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
right=myCell.x+j;
COPY(y_offset_in_block+i, x_offset_in_block+j, bottom*plateDims.x+right)
}
}
}
}
/***********************/
}
}
else
{
if(threadIdx.y==(plateDims.y-(bc2)-1)%THREAD_BLOCK_HEIGHT)
{
FOR(i)
shared_cells[y_offset_in_block+i][x_offset_in_block]=g_plateblock_cellsInput[(myCell.y+i)*plateDims.x +me];
if(LEFT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
left=myCell.x-j;
COPY(y_offset_in_block+i, x_offset_in_block-j, bottom*plateDims.x+left)
}
}
}
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
right=myCell.x+j;
COPY(y_offset_in_block+i, x_offset_in_block+j, bottom*plateDims.x+right)
}
}
}
}
else
{
if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
COPY(y_offset_in_block+i, x_offset_in_block+j, bottom*plateDims.x+myCell.x+j)
}
}
}
}
}
if(LEFT_THREAD)
{
FOR(i)
COPY(y_offset_in_block, x_offset_in_block-i, myCell.y*plateDims.x+myCell.x-i)
}
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
{
FOR(i)
COPY(y_offset_in_block, x_offset_in_block+i, myCell.y*plateDims.x+myCell.x+i)
}
}
else
{ if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
COPY(y_offset_in_block, x_offset_in_block+i, myCell.y*plateDims.x+myCell.x+i)
}
}
SYNCHRONIZATION;
res=NN_MEAN(shared_cells,y_offset_in_block,x_offset_in_block);
//Move all the ghost cells to iteration i+1
float abv_new_state=0.0f,blw_new_state=0.0f,left_new_state=0.0f,right_new_state=0.0f;
float abv_new_state_array[BOUNDARYCELLS];
//Hacky. Boundary blocks should not update ghosts since cells are immutable
if(TOP_THREAD)
{
//update ghosts too
if(!blockIdx.y)
{
for(int i=1;i<BOUNDARYCELLS;i++)
abv_new_state_array[i-1]=shared_cells[y_offset_in_block-i][x_offset_in_block];
}
//The first block on the grid should not update ghost cells !
else
{
for(int i=1;i<BOUNDARYCELLS;i++)
abv_new_state_array[i-1]=(shared_cells[y_offset_in_block-i-1][x_offset_in_block] +
shared_cells[y_offset_in_block-i][x_offset_in_block-i] +
shared_cells[y_offset_in_block-i][x_offset_in_block+i] +
shared_cells[y_offset_in_block][x_offset_in_block])*0.25f;
}
abv_new_state=abv_new_state_array[0];
}
if(LEFT_THREAD)
{
if(!blockIdx.x)
left_new_state=shared_cells[y_offset_in_block][x_offset_in_block-1];
else
left_new_state=(shared_cells[y_offset_in_block-1][x_offset_in_block-1] +
shared_cells[y_offset_in_block][x_offset_in_block-2] +
shared_cells[y_offset_in_block][x_offset_in_block]+
shared_cells[1+y_offset_in_block][x_offset_in_block-1])*0.25f;
}
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
right_new_state=(shared_cells[y_offset_in_block-1][1+x_offset_in_block] +
shared_cells[y_offset_in_block][x_offset_in_block]+
shared_cells[y_offset_in_block][x_offset_in_block+2] +
shared_cells[1+y_offset_in_block][1+x_offset_in_block])*0.25f;
}
else
{
if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
right_new_state=shared_cells[y_offset_in_block][1+x_offset_in_block];
}
if(!GRID_BOTTOM_BLOCK)
{
if(BOTTOM_THREAD)
{
blw_new_state=(shared_cells[y_offset_in_block][x_offset_in_block]+
shared_cells[1+y_offset_in_block][x_offset_in_block-1]+
shared_cells[1+y_offset_in_block][1+x_offset_in_block] +
shared_cells[2+y_offset_in_block][x_offset_in_block])*0.25f;
}
}
else
{
if(threadIdx.y==(plateDims.y-bc2-1)%THREAD_BLOCK_HEIGHT) //gives the last thread
blw_new_state=shared_cells[1+y_offset_in_block][x_offset_in_block];
}
/****************Relaxation*****************************/
SYNCHRONIZATION;
shared_cells[y_offset_in_block][x_offset_in_block]=res;
// All boundary cells update state !
if(TOP_THREAD)
shared_cells[y_offset_in_block-1][x_offset_in_block]=abv_new_state;
if(LEFT_THREAD)
shared_cells[y_offset_in_block][x_offset_in_block-1]=left_new_state;
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
shared_cells[y_offset_in_block][1+x_offset_in_block]=right_new_state;
}
else
{
if(threadIdx.x==(plateDims.x-(bc2)-1)%THREAD_BLOCK_WIDTH)
shared_cells[y_offset_in_block][1+x_offset_in_block]=right_new_state;
}
if(!GRID_BOTTOM_BLOCK) {
if(BOTTOM_THREAD)
shared_cells[1+y_offset_in_block][x_offset_in_block]=blw_new_state;
}
else
{
if(threadIdx.y==(plateDims.y-(bc2)-1)%THREAD_BLOCK_HEIGHT)
shared_cells[1+y_offset_in_block][x_offset_in_block]=blw_new_state;
}
SYNCHRONIZATION;
res=NN_MEAN(shared_cells,y_offset_in_block,x_offset_in_block);
g_plateblock_cellsOutput[myCell.y*plateDims.x+me]=res;
}
#endif
//debug function
void print_matrix(float**u)
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
printf("%f ",u[i][j]);
}
printf("\n");
}
}
float **allocate_cells(int num_cols, int num_rows) {
float **array = (float **) malloc(num_rows * sizeof(float *));
array[0] = (float *) malloc(num_rows * num_cols * sizeof(float));
int i;
for (i = 1; i < num_rows; i++) {
array[i] = array[0] + (i * num_cols);
}
return array;
}
// Sets all of the specified cells to their initial value.
int main()
{
float *cells[2], *cells_gpu[2], **steady_state;
int i,j;
size_t size;
float h = 1.0/SIZE;
size=SIZE*SIZE*sizeof(float);
printf("Necesitamos %d Mb\n",3*size/1024/1024);
cells[0] = (float*)malloc(size);
cells[1] = (float*)malloc(size);
steady_state = allocate_cells(SIZE, SIZE);
//Allocate GPU memory
hipMalloc(&cells_gpu[0],size);
hipMalloc(&cells_gpu[1],size);
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
cells[0][i*SIZE+j] = cells[1][i*SIZE+j] = INITIAL_CELL_VALUE;
}
}
for(i=0;i<SIZE;i++)
{
cells[0][i] = cells[1][i] = TOP_BOUNDARY_VALUE;
cells[0][i*SIZE] = cells[1][i*SIZE] = LEFT_BOUNDARY_VALUE;
cells[0][SIZE*(SIZE-1)+i] = cells[1][SIZE*(SIZE-1)+i] = BOTTOM_BOUNDARY_VALUE;
cells[0][i*SIZE+SIZE-1] = cells[1][i*SIZE+SIZE-1] = RIGHT_BOUNDARY_VALUE;
}
hipMemcpy(cells_gpu[0], cells[0], size, hipMemcpyHostToDevice);
hipMemcpy(cells_gpu[1], cells[1], size, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
time_t start_time = time(NULL);
//perform relaxation
for(i=0;i<ITERATIONS/GHOST;i++)
{
if(i%2==0)
hipLaunchKernelGGL(( jacobi), dim3(dimGrid),dim3(dimBlock), 0, 0, dimBlock, cells_gpu[0], cells_gpu[1], dimGrid);
else
hipLaunchKernelGGL(( jacobi), dim3(dimGrid),dim3(dimBlock), 0, 0, dimBlock, cells_gpu[1], cells_gpu[0], dimGrid);
if(i%10==0)
printf("iter=%d\n",i);
}
int final_cells = (ITERATIONS % 2 == 0) ? 1 : 0;
hipMemcpy(cells[0], cells_gpu[final_cells], size, hipMemcpyDeviceToHost);
//copy the 1D cells to a 2D array for ppm conversion
for(i=0;i<SIZE;i++)
{
for(j=0; j < SIZE; j++){
steady_state[i][j] = cells[0][i*SIZE+j];
// printf("%f ", cells[0][i*SIZE+j]);
}
//printf("\n");
}
time_t end_time = time(NULL);
printf("\nExecution time: %d seconds\n", (int) difftime(end_time, start_time));
//create_snapshot(cells, SIZE-2, SIZE-2, ITERATIONS);
/* Liberamos memoria */
free(cells[0]);
free(cells[1]);
hipFree(cells_gpu[0]);
hipFree(cells_gpu[1]);
}
void create_snapshot(float **cells, int num_cols, int num_rows, int id) {
/*code omitted*/
}
| 56567b28b0fd9b96ecf1121d577c42f201e4eeb5.cu | #ifndef _HOTPLATE_KERNEL_H_
#define _HOTPLATE_KERNEL_H_
// The width of external cells that surround the plate's cells
#define BOUNDARYCELLS 2
#define THREAD_BLOCK_WIDTH 128
#define THREAD_BLOCK_HEIGHT 4
#define FOR(A) for(int A=1;A<=BOUNDARYCELLS;A++)
#define SYNCHRONIZATION __syncthreads()
#define NN_MEAN(array,a,b) (array[a][b+1]+array[a+1][b]+array[a-1][b]+array[a][b-1])*0.25
#define GHOST 4
//Used several macros as opposed to functions to provide opportunities for vectorization
#define GRID_BOTTOM_BLOCK (blockIdx.y==(gridDim.y-1))
#define LEFT_THREAD (!threadIdx.x)
#define TOP_THREAD (!threadIdx.y)
#define RIGHT_THREAD(threadIdx.x==(THREAD_BLOCK_WIDTH-1))
#define BOTTOM_THREAD(threadIdx.y==(THREAD_BLOCK_HEIGHT-1))
#define COPY(A,B,C) shared_cells[A][B]=gplate_block_cellsInput[C];
void create_snapshot(float **cells, int n_x, int n_y, int id);
__global__ void Kernel(
dim3 plateDims,
float* g_plateblock_cellsInput,
float* g_plateblock_cellsOutput, dim3 gridDims)
{
//Executing thread's location
dim3 myCell((blockIdx.x*blockDim.x)+threadIdx.x+BOUNDARYCELLS,
(blockIdx.y*blockDim.y)+threadIdx.y+BOUNDARYCELLS);
if ((myCell.x >= plateDims.x-BOUNDARYCELLS) || (myCell.y >= plateDims.y - BOUNDARYCELLS)) {
// My location is outside of the plate boundaries
return;
}
/*calculating positions with respect to 1D array and thread position on the grid*/
int left=myCell.x-1;
int right=myCell.x+1;
int top=myCell.y-1;
int bottom=myCell.y+1;
int me=myCell.x;
int bc2=BOUNDARYCELLS;
int x_offset_in_block=threadIdx.x+BOUNDARYCELLS; //column
int y_offset_in_block=threadIdx.y+BOUNDARYCELLS; //row
bc2=bc2<<1;
float res=0.0f;
__shared__ float shared_cells[THREAD_BLOCK_HEIGHT+(2*BOUNDARYCELLS)][THREAD_BLOCK_WIDTH+(BOUNDARYCELLS*2)];
shared_cells[y_offset_in_block][x_offset_in_block]=g_plateblock_cellsInput[myCell.y*plateDims.x+myCell.x]; //I put my value first
//Top thread gets the required cells at the top
if(TOP_THREAD)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
COPY(temp, xoffset_in_block, top*plateDims.x+me)
}
//Left thread gets ghost cells
if(LEFT_THREAD)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
FOR(j)
{
left=myCell.x-j;
COPY(temp, xoffset_in_block, top*plateDims.x+left)
}
}
}
//unless the executing thread handles the right block
if(!GRID_RIGHT_BLOCK)
{
//Right Threads get ghost cells
if(RIGHT_THREAD)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
FOR(j)
{
right=myCell.x+j;
COPY(temp, xoffset_in_block, top*plateDims.x+right)
}
}
}
}
//Executing thread handles grid's right block
else
{
//Last thread in the right block
if(threadIdx.x==(plateDims.x-(bc2)-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
{
top=myCell.y-i;
int temp=y_offset_in_block-i;
FOR(j)
{
right=myCell.x+j;
COPY(temp, xoffset_in_block + j, top*plateDims.x+right)
}
}
}
}
}
if(!GRID_BOTTOM_BLOCK)
{
//bottom thread gets required bottom cells
if(BOTTOM_THREAD)
{
FOR(i)
{
int temp=y_offset_in_block+i;
bottom=myCell.y+i;
COPY(temp, xoffset_in_block + j, bottom*plateDims.x+me)
}
//get ghost cells
if(LEFT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
left=myCell.x-j;
COPY(y_offset_in_block+i, xoffset_in_block - j, bottom*plateDims.x+left)
}
}
}
if(GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
int temp=y_offset_in_block+i;
FOR(j)
{
COPY(temp, x_offset_in_block+j, bottom*plateDims.x+right)
}
}
}
}
else
{
if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
right=myCell.x+j;
COPY(y_offset_in_block+i, x_offset_in_block+j, bottom*plateDims.x+right)
}
}
}
}
/***********************/
}
}
else
{
if(threadIdx.y==(plateDims.y-(bc2)-1)%THREAD_BLOCK_HEIGHT)
{
FOR(i)
shared_cells[y_offset_in_block+i][x_offset_in_block]=g_plateblock_cellsInput[(myCell.y+i)*plateDims.x +me];
if(LEFT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
left=myCell.x-j;
COPY(y_offset_in_block+i, x_offset_in_block-j, bottom*plateDims.x+left)
}
}
}
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
{
right=myCell.x+j;
COPY(y_offset_in_block+i, x_offset_in_block+j, bottom*plateDims.x+right)
}
}
}
}
else
{
if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
{
bottom=myCell.y+i;
FOR(j)
COPY(y_offset_in_block+i, x_offset_in_block+j, bottom*plateDims.x+myCell.x+j)
}
}
}
}
}
if(LEFT_THREAD)
{
FOR(i)
COPY(y_offset_in_block, x_offset_in_block-i, myCell.y*plateDims.x+myCell.x-i)
}
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
{
FOR(i)
COPY(y_offset_in_block, x_offset_in_block+i, myCell.y*plateDims.x+myCell.x+i)
}
}
else
{ if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
{
FOR(i)
COPY(y_offset_in_block, x_offset_in_block+i, myCell.y*plateDims.x+myCell.x+i)
}
}
SYNCHRONIZATION;
res=NN_MEAN(shared_cells,y_offset_in_block,x_offset_in_block);
//Move all the ghost cells to iteration i+1
float abv_new_state=0.0f,blw_new_state=0.0f,left_new_state=0.0f,right_new_state=0.0f;
float abv_new_state_array[BOUNDARYCELLS];
//Hacky. Boundary blocks should not update ghosts since cells are immutable
if(TOP_THREAD)
{
//update ghosts too
if(!blockIdx.y)
{
for(int i=1;i<BOUNDARYCELLS;i++)
abv_new_state_array[i-1]=shared_cells[y_offset_in_block-i][x_offset_in_block];
}
//The first block on the grid should not update ghost cells !
else
{
for(int i=1;i<BOUNDARYCELLS;i++)
abv_new_state_array[i-1]=(shared_cells[y_offset_in_block-i-1][x_offset_in_block] +
shared_cells[y_offset_in_block-i][x_offset_in_block-i] +
shared_cells[y_offset_in_block-i][x_offset_in_block+i] +
shared_cells[y_offset_in_block][x_offset_in_block])*0.25f;
}
abv_new_state=abv_new_state_array[0];
}
if(LEFT_THREAD)
{
if(!blockIdx.x)
left_new_state=shared_cells[y_offset_in_block][x_offset_in_block-1];
else
left_new_state=(shared_cells[y_offset_in_block-1][x_offset_in_block-1] +
shared_cells[y_offset_in_block][x_offset_in_block-2] +
shared_cells[y_offset_in_block][x_offset_in_block]+
shared_cells[1+y_offset_in_block][x_offset_in_block-1])*0.25f;
}
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
right_new_state=(shared_cells[y_offset_in_block-1][1+x_offset_in_block] +
shared_cells[y_offset_in_block][x_offset_in_block]+
shared_cells[y_offset_in_block][x_offset_in_block+2] +
shared_cells[1+y_offset_in_block][1+x_offset_in_block])*0.25f;
}
else
{
if(threadIdx.x==(plateDims.x-bc2-1)%THREAD_BLOCK_WIDTH)
right_new_state=shared_cells[y_offset_in_block][1+x_offset_in_block];
}
if(!GRID_BOTTOM_BLOCK)
{
if(BOTTOM_THREAD)
{
blw_new_state=(shared_cells[y_offset_in_block][x_offset_in_block]+
shared_cells[1+y_offset_in_block][x_offset_in_block-1]+
shared_cells[1+y_offset_in_block][1+x_offset_in_block] +
shared_cells[2+y_offset_in_block][x_offset_in_block])*0.25f;
}
}
else
{
if(threadIdx.y==(plateDims.y-bc2-1)%THREAD_BLOCK_HEIGHT) //gives the last thread
blw_new_state=shared_cells[1+y_offset_in_block][x_offset_in_block];
}
/****************Relaxation*****************************/
SYNCHRONIZATION;
shared_cells[y_offset_in_block][x_offset_in_block]=res;
// All boundary cells update state !
if(TOP_THREAD)
shared_cells[y_offset_in_block-1][x_offset_in_block]=abv_new_state;
if(LEFT_THREAD)
shared_cells[y_offset_in_block][x_offset_in_block-1]=left_new_state;
if(!GRID_RIGHT_BLOCK)
{
if(RIGHT_THREAD)
shared_cells[y_offset_in_block][1+x_offset_in_block]=right_new_state;
}
else
{
if(threadIdx.x==(plateDims.x-(bc2)-1)%THREAD_BLOCK_WIDTH)
shared_cells[y_offset_in_block][1+x_offset_in_block]=right_new_state;
}
if(!GRID_BOTTOM_BLOCK) {
if(BOTTOM_THREAD)
shared_cells[1+y_offset_in_block][x_offset_in_block]=blw_new_state;
}
else
{
if(threadIdx.y==(plateDims.y-(bc2)-1)%THREAD_BLOCK_HEIGHT)
shared_cells[1+y_offset_in_block][x_offset_in_block]=blw_new_state;
}
SYNCHRONIZATION;
res=NN_MEAN(shared_cells,y_offset_in_block,x_offset_in_block);
g_plateblock_cellsOutput[myCell.y*plateDims.x+me]=res;
}
#endif
//debug function
void print_matrix(float**u)
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
printf("%f ",u[i][j]);
}
printf("\n");
}
}
float **allocate_cells(int num_cols, int num_rows) {
float **array = (float **) malloc(num_rows * sizeof(float *));
array[0] = (float *) malloc(num_rows * num_cols * sizeof(float));
int i;
for (i = 1; i < num_rows; i++) {
array[i] = array[0] + (i * num_cols);
}
return array;
}
// Sets all of the specified cells to their initial value.
int main()
{
float *cells[2], *cells_gpu[2], **steady_state;
int i,j;
size_t size;
float h = 1.0/SIZE;
size=SIZE*SIZE*sizeof(float);
printf("Necesitamos %d Mb\n",3*size/1024/1024);
cells[0] = (float*)malloc(size);
cells[1] = (float*)malloc(size);
steady_state = allocate_cells(SIZE, SIZE);
//Allocate GPU memory
cudaMalloc(&cells_gpu[0],size);
cudaMalloc(&cells_gpu[1],size);
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
cells[0][i*SIZE+j] = cells[1][i*SIZE+j] = INITIAL_CELL_VALUE;
}
}
for(i=0;i<SIZE;i++)
{
cells[0][i] = cells[1][i] = TOP_BOUNDARY_VALUE;
cells[0][i*SIZE] = cells[1][i*SIZE] = LEFT_BOUNDARY_VALUE;
cells[0][SIZE*(SIZE-1)+i] = cells[1][SIZE*(SIZE-1)+i] = BOTTOM_BOUNDARY_VALUE;
cells[0][i*SIZE+SIZE-1] = cells[1][i*SIZE+SIZE-1] = RIGHT_BOUNDARY_VALUE;
}
cudaMemcpy(cells_gpu[0], cells[0], size, cudaMemcpyHostToDevice);
cudaMemcpy(cells_gpu[1], cells[1], size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
time_t start_time = time(NULL);
//perform relaxation
for(i=0;i<ITERATIONS/GHOST;i++)
{
if(i%2==0)
jacobi<<<dimGrid,dimBlock>>>(dimBlock, cells_gpu[0], cells_gpu[1], dimGrid);
else
jacobi<<<dimGrid,dimBlock>>>(dimBlock, cells_gpu[1], cells_gpu[0], dimGrid);
if(i%10==0)
printf("iter=%d\n",i);
}
int final_cells = (ITERATIONS % 2 == 0) ? 1 : 0;
cudaMemcpy(cells[0], cells_gpu[final_cells], size, cudaMemcpyDeviceToHost);
//copy the 1D cells to a 2D array for ppm conversion
for(i=0;i<SIZE;i++)
{
for(j=0; j < SIZE; j++){
steady_state[i][j] = cells[0][i*SIZE+j];
// printf("%f ", cells[0][i*SIZE+j]);
}
//printf("\n");
}
time_t end_time = time(NULL);
printf("\nExecution time: %d seconds\n", (int) difftime(end_time, start_time));
//create_snapshot(cells, SIZE-2, SIZE-2, ITERATIONS);
/* Liberamos memoria */
free(cells[0]);
free(cells[1]);
cudaFree(cells_gpu[0]);
cudaFree(cells_gpu[1]);
}
void create_snapshot(float **cells, int num_cols, int num_rows, int id) {
/*code omitted*/
}
|
6805dcf376e61695125e904499da3f92403f0c0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Created by Francesco Sgherzi on 15/04/19.
//
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <chrono>
#include <fstream>
#include <map>
#include <vector>
#include <algorithm>
#include <thrust/inner_product.h>
#include <thrust/device_ptr.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include "Parse/Parse.h"
#include "Utils/Utils.h"
#define TAU 0.0
#define ALPHA 0.85
#define MAX_B 1024
#define MAX_T 1024
#define DEBUG true
#define USE_NO_OPTIMIZATION false
#define USE_L2_NORM true
#define USE_L2_NORM_BITMASK false
#define GRAPH_TYPE ((std::string) "smw")
#define PYTHON_PAGERANK_VALUES false
#define PYTHON_CONVERGENCE_ERROR_OUT false
#define MAX_ITER 200
#define num_type long long unsigned
// 0.000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000
#define SCALE 63
__host__
__device__
__forceinline__
num_type d_to_fixed(double x) {
return x * ((double) ((num_type) 1 << SCALE));
}
__host__
__device__
__forceinline__
num_type fixed_mult(num_type x, num_type y) {
return d_to_fixed(((double) ((double) x / (double) (((num_type) 1) << SCALE)) * ((double) y / (double) (((num_type) 1) << SCALE))));
}
csc_fixed_t to_fixed_csc(csc_t m) {
csc_fixed_t fixed_csc;
fixed_csc.col_idx = m.col_idx;
fixed_csc.non_zero = m.non_zero;
fixed_csc.val = std::vector<num_type>();
for (int i = 0; i < m.val.size(); ++i) {
fixed_csc.val.push_back(d_to_fixed(m.val[i]));
}
return fixed_csc;
}
template<typename T>
void to_device_csc(T *csc_val, int *csc_non_zero, int *csc_col_idx, const csc_fixed_t src) {
hipMemcpy(csc_val, &src.val[0], sizeof(T) * src.val.size(), hipMemcpyHostToDevice);
hipMemcpy(csc_non_zero, &src.non_zero[0], sizeof(int) * src.non_zero.size(), hipMemcpyHostToDevice);
hipMemcpy(csc_col_idx, &src.col_idx[0], sizeof(int) * src.col_idx.size(), hipMemcpyHostToDevice);
}
__global__
void d_fixed_set_dangling_bitmap(bool *dangling_bitmap, int *csc_col_idx, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
dangling_bitmap[csc_col_idx[i]] = 0;
}
}
template<typename T>
__global__
void d_fixed_spmv(T *Y, T *pr, T *csc_val, int *csc_non_zero, int *csc_col_idx, const int DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
int begin = csc_non_zero[i];
int end = csc_non_zero[i + 1];
T acc = d_to_fixed(0.0);
for (int j = begin; j < end; ++j) {
acc += fixed_mult(csc_val[j], pr[csc_col_idx[j]]);
}
Y[i] = acc;
}
}
template<typename T>
__global__
void
d_update_fixed_spmv(T *Y, T *pr, T *csc_val, int *csc_non_zero, int *csc_col_idx, bool *update_bitmap, const int DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
const T initial_zero = d_to_fixed(0.0);
for (int i = init; i < DIMV && update_bitmap[i]; i += stride) {
int begin = csc_non_zero[i];
int end = csc_non_zero[i + 1];
T acc = initial_zero;
for (int j = begin; j < end; ++j) {
acc += fixed_mult(csc_val[j], pr[csc_col_idx[j]]);
}
Y[i] = acc;
}
}
template<typename T>
__global__
void d_set_value(T *v, const T value, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
v[i] = value;
}
}
template<typename T>
__global__
void d_fixed_scale(T *v, T value, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
v[i] = fixed_mult(v[i], value);
}
}
template<typename T>
__global__
void d_fixed_shift(T *v, T value, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
v[i] = v[i] + value;
}
}
__device__
__forceinline__
unsigned d_fixed_abs(const unsigned x, const unsigned y) {
if (x > y) return x - y;
else return y - x;
}
template<typename T>
__global__
void d_update_fixed_compute_error(T *error, T *v1, T *v2, bool *update_bitmap, const T max_err, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV && update_bitmap[i]; i += stride) {
error[i] = d_fixed_abs(v1[i], v2[i]);
update_bitmap[i] = error[i] >= max_err;
}
}
template<typename T>
__global__
void d_fixed_compute_error(T *error, T *v1, T *v2, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
error[i] = d_fixed_abs(v1[i], v2[i]);
}
}
template<typename T>
bool check_error(T *e, const T error, const unsigned DIMV) {
for (int i = 0; i < DIMV; ++i) {
if (e[i] > error) return false;
}
return true;
}
template<typename T>
struct d_fixed_add_functor : public thrust::binary_function<T, T, T> {
__device__
T operator()(const T &x, const T &y) const {
return x + y;
}
};
template<typename T, typename S>
struct d_fixed_mult_functor : public thrust::binary_function<T, S, T> {
__device__
T operator()(const T &x, const S &y) const {
return fixed_mult(x, y);
}
};
template<typename T1, typename T2>
T2 d_fixed_dot(T1 *x, T2 *y, size_t n) {
return thrust::inner_product(
thrust::device,
thrust::device_pointer_cast(x),
thrust::device_pointer_cast(x + n),
thrust::device_pointer_cast(y),
0,
d_fixed_add_functor<T2>(),
d_fixed_mult_functor<T2, T1>()
);
}
template<typename T>
void debug_print(char *name, T *v, const unsigned DIMV) {
T *test;
hipHostMalloc(&test, DIMV * sizeof(num_type));
hipMemcpy(test, v, DIMV * sizeof(num_type), hipMemcpyDeviceToHost);
std::cout << "---------------------DEBUG:" << name << "-------------------" << std::endl;
for (int i = 0; i < DIMV; ++i) {
std::cout << test[i] << std::endl;
}
std::cout << "------------------END DEBUG:" << name << "-------------------" << std::endl;
}
/**
* Performs an axpb operation on the x vector inplace
* @tparam T Numeric type
* @param x The vector to scale and shift
* @param a scaling factor
* @param b shifting factor
* @return
*/
template<typename T>
__global__
void d_fixed_axpb(T *x, T a, T b, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
x[i] = fixed_mult(x[i], a) + b;
}
}
struct is_over_error {
__device__
bool operator()(num_type &x) {
return x > d_to_fixed(TAU);
}
};
struct d_fixed_square_functor {
__device__
num_type operator()(num_type &x) {
return fixed_mult(x, x);
}
};
template<typename T>
T euclidean_error(T *error, const unsigned DIMV) {
return thrust::transform_reduce(
thrust::device,
error,
error + DIMV,
d_fixed_square_functor(),
0.0,
d_fixed_add_functor<T>()
);
}
int main() {
/**
* HOST
*/
num_type *pr;
num_type *error;
num_type *convergence_error_vector;
/**
* DEVICE
*/
num_type *d_pr;
num_type *d_error;
num_type *d_spmv_res;
num_type *d_csc_val;
int *d_csc_non_zero;
int *d_csc_col_idx;
bool *d_dangling_bitmap;
bool *d_update_bitmap;
csc_t csc_matrix = parse_dir("/home/fra/University/HPPS/Approximate-PR/new_ds/" + GRAPH_TYPE, DEBUG);
csc_fixed_t fixed_csc = to_fixed_csc(csc_matrix);
const unsigned NON_ZERO = csc_matrix.val.size();
const unsigned DIM = csc_matrix.non_zero.size() - 1;
if (DEBUG) {
std::cout << "\nFEATURES: " << std::endl;
std::cout << "\tNumber of non zero elements: " << NON_ZERO << std::endl;
std::cout << "\tNumber of nodes: " << DIM << std::endl;
std::cout << "\tSparseness: " << (1 - (((double) NON_ZERO) / (DIM * DIM))) * 100 << "%\n" << std::endl;
}
hipHostMalloc(&pr, sizeof(num_type) * DIM);
hipHostMalloc(&error, sizeof(num_type) * DIM);
if (DEBUG) {
std::cout << "Initializing device memory" << std::endl;
}
// Create device memory
hipMalloc(&d_csc_val, sizeof(num_type) * NON_ZERO);
hipMalloc(&d_csc_non_zero, sizeof(int) * (DIM + 1));
hipMalloc(&d_csc_col_idx, sizeof(num_type) * NON_ZERO);
hipMalloc(&d_pr, sizeof(num_type) * DIM);
hipMalloc(&d_error, sizeof(num_type) * DIM);
hipMalloc(&d_spmv_res, sizeof(num_type) * DIM);
hipMalloc(&d_dangling_bitmap, DIM * sizeof(bool));
hipMalloc(&d_update_bitmap, DIM * sizeof(bool));
convergence_error_vector = (num_type *) calloc(MAX_ITER, sizeof(num_type));
// Transform the std::vectors into device vectors
to_device_csc(d_csc_val, d_csc_non_zero, d_csc_col_idx, fixed_csc);
if (DEBUG) {
std::cout << "Initializing PR, Error, dangling bitmap, update bitmap vecors" << std::endl;
}
d_set_value << < MAX_B, MAX_T >> > (d_pr, d_to_fixed(1.0 / DIM), DIM);
d_set_value << < MAX_B, MAX_T >> > (d_error, d_to_fixed(1.0), DIM);
d_set_value << < MAX_B, MAX_T >> > (d_dangling_bitmap, true, DIM);
d_set_value << < MAX_B, MAX_T >> > (d_update_bitmap, true, DIM);
d_fixed_set_dangling_bitmap << < MAX_B, MAX_T >> > (d_dangling_bitmap, d_csc_col_idx, NON_ZERO);
// debug_print("d_dangling_bitmap", d_dangling_bitmap, DIM);
hipMemcpy(pr, d_pr, DIM * sizeof(num_type), hipMemcpyDeviceToHost);
hipMemcpy(error, d_error, DIM * sizeof(num_type), hipMemcpyDeviceToHost);
if (DEBUG) {
std::cout << "Beginning pagerank" << std::endl;
}
int iterations = 0;
bool converged = false;
const num_type F_ALPHA = d_to_fixed(ALPHA);
const num_type F_TAU = d_to_fixed(TAU);
const num_type F_SHIFT = d_to_fixed((1.0 - ALPHA) / DIM);
const num_type F_DANGLING_SCALE = d_to_fixed(ALPHA / DIM);
// Start a timer
auto pr_clock_start = std::chrono::high_resolution_clock::now();
while (!converged && iterations < MAX_ITER) {
if(USE_NO_OPTIMIZATION){
// SpMV
d_fixed_spmv << < MAX_B, MAX_T >> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error
d_fixed_compute_error << < MAX_B, MAX_T >> > (d_error, d_spmv_res, d_pr, DIM);
// Swap back the pagerank values
hipMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), hipMemcpyDeviceToDevice);
// Check for convergence
converged = thrust::count_if(thrust::device, d_error, d_error + DIM, is_over_error()) == 0;
}
if(USE_L2_NORM){
// SpMV
d_fixed_spmv << < MAX_B, MAX_T >> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error
d_fixed_compute_error << < MAX_B, MAX_T >> > (d_error, d_spmv_res, d_pr, DIM);
// Compute the l2 norm
num_type error_euc = euclidean_error(d_error, DIM);
//convergence_error_vector[iterations] = error_euc;
// Swap back the pagerank values
hipMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), hipMemcpyDeviceToDevice);
// Check for convergence
converged = error_euc <= F_TAU;
}
if(USE_L2_NORM_BITMASK){
// SpMV
d_update_fixed_spmv<< <MAX_B, MAX_T>> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, d_update_bitmap, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error and bitmask
d_update_fixed_compute_error << <MAX_B, MAX_T>> > (d_error, d_spmv_res, d_pr, d_update_bitmap, F_TAU, DIM);
// Compute the l2 norm
num_type error_euc = euclidean_error(d_error, DIM);
// convergence_error_vector[iterations] = error_euc;
// Swap back the pagerank values
hipMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), hipMemcpyDeviceToDevice);
// Check for convergence
converged = error_euc <= F_TAU;
}
/*
// SpMV
d_fixed_spmv << < MAX_B, MAX_T >> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, DIM);
//d_update_fixed_spmv<< <MAX_B, MAX_T>> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, d_update_bitmap, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
//num_type res_v = h_fixed_dot(DIM, d_dangling_bitmap, d_pr);
//std::cout << "Thrust: " << res_v << " <-> Host: " << res_v_h << " -> diff: " << h_s_abs(res_v_h, res_v) << std::endl;
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error
d_fixed_compute_error << < MAX_B, MAX_T >> > (d_error, d_spmv_res, d_pr, DIM);
//d_update_fixed_compute_error << <MAX_B, MAX_T>> > (d_error, d_spmv_res, d_pr, d_update_bitmap, F_TAU, DIM);
num_type error_euc = euclidean_error(d_error, DIM);
convergence_error_vector[iterations] = error_euc;
hipMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), hipMemcpyDeviceToDevice);
//converged = thrust::count_if(thrust::device, d_error, d_error + DIM, is_over_error()) == 0;
converged = error_euc <= F_TAU;*/
iterations++;
}
// Stop the timer
auto pr_clock_end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(pr_clock_end - pr_clock_start).count();
if (DEBUG) {
std::cout << "Pagerank converged after " << duration << " ms" << std::endl;
}
hipMemcpy(pr, d_pr, DIM * sizeof(num_type), hipMemcpyDeviceToHost);
if (DEBUG) {
std::cout << "Pagerank converged after " << iterations << " iterations" << std::endl;
}
std::map<int, num_type> pr_map;
std::vector<std::pair<int, num_type>> sorted_pr;
std::vector<int> sorted_pr_idxs;
for (int i = 0; i < DIM; ++i) {
sorted_pr.push_back({i, pr[i]});
pr_map[i] = pr[i];
}
std::sort(sorted_pr.begin(), sorted_pr.end(),
[](const std::pair<int, num_type> &l, const std::pair<int, num_type> &r) {
if (l.second != r.second)return l.second > r.second;
else return l.first > r.first;
});
for (auto const &pair: sorted_pr) {
sorted_pr_idxs.push_back(pair.first);
//std::cout << pair.first << "," << pair.second << std::endl;
}
if (DEBUG) {
std::cout << "Checking results..." << std::endl;
std::ifstream results;
results.open("/home/fra/University/HPPS/Approximate-PR/new_ds/" + GRAPH_TYPE + "/results.txt");
int i = 0;
int tmp = 0;
int errors = 0;
int prev_left_idx = 0;
int prev_right_idx = 0;
while (results >> tmp) {
if (tmp != sorted_pr_idxs[i]) {
if (prev_left_idx != sorted_pr_idxs[i] || prev_right_idx != tmp) {
errors++;
if (errors <= 10) {
// Print only the top 10 errors
std::cout << "ERROR AT INDEX " << i << ": " << tmp << " != " << sorted_pr_idxs[i]
<< " Value => " << (num_type) pr_map[sorted_pr_idxs[i]] << std::endl;
}
}
prev_left_idx = tmp;
prev_right_idx = sorted_pr_idxs[i];
}
i++;
}
std::cout << "Percentage of error: " << (((double) errors) / (DIM)) * 100 << "%\n" << std::endl;
std::cout << "End of computation! Freeing memory..." << std::endl;
}
if (PYTHON_CONVERGENCE_ERROR_OUT) {
for (int i = 0; i < iterations; ++i) {
std::cout << "(" << i << "," << convergence_error_vector[i] << ")" << std::endl;
}
}
if (PYTHON_PAGERANK_VALUES) {
for (auto const &pair: sorted_pr) {
std::cout << pair.first << "," << pair.second << std::endl;
}
}
hipFree(&pr);
hipFree(&error);
hipFree(&d_pr);
hipFree(&d_error);
hipFree(&d_spmv_res);
hipFree(&d_csc_val);
hipFree(&d_csc_non_zero);
hipFree(&d_csc_col_idx);
hipDeviceReset();
return 0;
}
| 6805dcf376e61695125e904499da3f92403f0c0b.cu | // Created by Francesco Sgherzi on 15/04/19.
//
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <chrono>
#include <fstream>
#include <map>
#include <vector>
#include <algorithm>
#include <thrust/inner_product.h>
#include <thrust/device_ptr.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include "Parse/Parse.h"
#include "Utils/Utils.h"
#define TAU 0.0
#define ALPHA 0.85
#define MAX_B 1024
#define MAX_T 1024
#define DEBUG true
#define USE_NO_OPTIMIZATION false
#define USE_L2_NORM true
#define USE_L2_NORM_BITMASK false
#define GRAPH_TYPE ((std::string) "smw")
#define PYTHON_PAGERANK_VALUES false
#define PYTHON_CONVERGENCE_ERROR_OUT false
#define MAX_ITER 200
#define num_type long long unsigned
// 0.000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000
#define SCALE 63
__host__
__device__
__forceinline__
num_type d_to_fixed(double x) {
return x * ((double) ((num_type) 1 << SCALE));
}
__host__
__device__
__forceinline__
num_type fixed_mult(num_type x, num_type y) {
return d_to_fixed(((double) ((double) x / (double) (((num_type) 1) << SCALE)) * ((double) y / (double) (((num_type) 1) << SCALE))));
}
csc_fixed_t to_fixed_csc(csc_t m) {
csc_fixed_t fixed_csc;
fixed_csc.col_idx = m.col_idx;
fixed_csc.non_zero = m.non_zero;
fixed_csc.val = std::vector<num_type>();
for (int i = 0; i < m.val.size(); ++i) {
fixed_csc.val.push_back(d_to_fixed(m.val[i]));
}
return fixed_csc;
}
template<typename T>
void to_device_csc(T *csc_val, int *csc_non_zero, int *csc_col_idx, const csc_fixed_t src) {
cudaMemcpy(csc_val, &src.val[0], sizeof(T) * src.val.size(), cudaMemcpyHostToDevice);
cudaMemcpy(csc_non_zero, &src.non_zero[0], sizeof(int) * src.non_zero.size(), cudaMemcpyHostToDevice);
cudaMemcpy(csc_col_idx, &src.col_idx[0], sizeof(int) * src.col_idx.size(), cudaMemcpyHostToDevice);
}
__global__
void d_fixed_set_dangling_bitmap(bool *dangling_bitmap, int *csc_col_idx, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
dangling_bitmap[csc_col_idx[i]] = 0;
}
}
template<typename T>
__global__
void d_fixed_spmv(T *Y, T *pr, T *csc_val, int *csc_non_zero, int *csc_col_idx, const int DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
int begin = csc_non_zero[i];
int end = csc_non_zero[i + 1];
T acc = d_to_fixed(0.0);
for (int j = begin; j < end; ++j) {
acc += fixed_mult(csc_val[j], pr[csc_col_idx[j]]);
}
Y[i] = acc;
}
}
template<typename T>
__global__
void
d_update_fixed_spmv(T *Y, T *pr, T *csc_val, int *csc_non_zero, int *csc_col_idx, bool *update_bitmap, const int DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
const T initial_zero = d_to_fixed(0.0);
for (int i = init; i < DIMV && update_bitmap[i]; i += stride) {
int begin = csc_non_zero[i];
int end = csc_non_zero[i + 1];
T acc = initial_zero;
for (int j = begin; j < end; ++j) {
acc += fixed_mult(csc_val[j], pr[csc_col_idx[j]]);
}
Y[i] = acc;
}
}
template<typename T>
__global__
void d_set_value(T *v, const T value, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
v[i] = value;
}
}
template<typename T>
__global__
void d_fixed_scale(T *v, T value, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
v[i] = fixed_mult(v[i], value);
}
}
template<typename T>
__global__
void d_fixed_shift(T *v, T value, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
v[i] = v[i] + value;
}
}
__device__
__forceinline__
unsigned d_fixed_abs(const unsigned x, const unsigned y) {
if (x > y) return x - y;
else return y - x;
}
template<typename T>
__global__
void d_update_fixed_compute_error(T *error, T *v1, T *v2, bool *update_bitmap, const T max_err, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV && update_bitmap[i]; i += stride) {
error[i] = d_fixed_abs(v1[i], v2[i]);
update_bitmap[i] = error[i] >= max_err;
}
}
template<typename T>
__global__
void d_fixed_compute_error(T *error, T *v1, T *v2, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
error[i] = d_fixed_abs(v1[i], v2[i]);
}
}
template<typename T>
bool check_error(T *e, const T error, const unsigned DIMV) {
for (int i = 0; i < DIMV; ++i) {
if (e[i] > error) return false;
}
return true;
}
template<typename T>
struct d_fixed_add_functor : public thrust::binary_function<T, T, T> {
__device__
T operator()(const T &x, const T &y) const {
return x + y;
}
};
template<typename T, typename S>
struct d_fixed_mult_functor : public thrust::binary_function<T, S, T> {
__device__
T operator()(const T &x, const S &y) const {
return fixed_mult(x, y);
}
};
template<typename T1, typename T2>
T2 d_fixed_dot(T1 *x, T2 *y, size_t n) {
return thrust::inner_product(
thrust::device,
thrust::device_pointer_cast(x),
thrust::device_pointer_cast(x + n),
thrust::device_pointer_cast(y),
0,
d_fixed_add_functor<T2>(),
d_fixed_mult_functor<T2, T1>()
);
}
template<typename T>
void debug_print(char *name, T *v, const unsigned DIMV) {
T *test;
cudaMallocHost(&test, DIMV * sizeof(num_type));
cudaMemcpy(test, v, DIMV * sizeof(num_type), cudaMemcpyDeviceToHost);
std::cout << "---------------------DEBUG:" << name << "-------------------" << std::endl;
for (int i = 0; i < DIMV; ++i) {
std::cout << test[i] << std::endl;
}
std::cout << "------------------END DEBUG:" << name << "-------------------" << std::endl;
}
/**
* Performs an axpb operation on the x vector inplace
* @tparam T Numeric type
* @param x The vector to scale and shift
* @param a scaling factor
* @param b shifting factor
* @return
*/
template<typename T>
__global__
void d_fixed_axpb(T *x, T a, T b, const unsigned DIMV) {
int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = init; i < DIMV; i += stride) {
x[i] = fixed_mult(x[i], a) + b;
}
}
struct is_over_error {
__device__
bool operator()(num_type &x) {
return x > d_to_fixed(TAU);
}
};
struct d_fixed_square_functor {
__device__
num_type operator()(num_type &x) {
return fixed_mult(x, x);
}
};
template<typename T>
T euclidean_error(T *error, const unsigned DIMV) {
return thrust::transform_reduce(
thrust::device,
error,
error + DIMV,
d_fixed_square_functor(),
0.0,
d_fixed_add_functor<T>()
);
}
int main() {
/**
* HOST
*/
num_type *pr;
num_type *error;
num_type *convergence_error_vector;
/**
* DEVICE
*/
num_type *d_pr;
num_type *d_error;
num_type *d_spmv_res;
num_type *d_csc_val;
int *d_csc_non_zero;
int *d_csc_col_idx;
bool *d_dangling_bitmap;
bool *d_update_bitmap;
csc_t csc_matrix = parse_dir("/home/fra/University/HPPS/Approximate-PR/new_ds/" + GRAPH_TYPE, DEBUG);
csc_fixed_t fixed_csc = to_fixed_csc(csc_matrix);
const unsigned NON_ZERO = csc_matrix.val.size();
const unsigned DIM = csc_matrix.non_zero.size() - 1;
if (DEBUG) {
std::cout << "\nFEATURES: " << std::endl;
std::cout << "\tNumber of non zero elements: " << NON_ZERO << std::endl;
std::cout << "\tNumber of nodes: " << DIM << std::endl;
std::cout << "\tSparseness: " << (1 - (((double) NON_ZERO) / (DIM * DIM))) * 100 << "%\n" << std::endl;
}
cudaMallocHost(&pr, sizeof(num_type) * DIM);
cudaMallocHost(&error, sizeof(num_type) * DIM);
if (DEBUG) {
std::cout << "Initializing device memory" << std::endl;
}
// Create device memory
cudaMalloc(&d_csc_val, sizeof(num_type) * NON_ZERO);
cudaMalloc(&d_csc_non_zero, sizeof(int) * (DIM + 1));
cudaMalloc(&d_csc_col_idx, sizeof(num_type) * NON_ZERO);
cudaMalloc(&d_pr, sizeof(num_type) * DIM);
cudaMalloc(&d_error, sizeof(num_type) * DIM);
cudaMalloc(&d_spmv_res, sizeof(num_type) * DIM);
cudaMalloc(&d_dangling_bitmap, DIM * sizeof(bool));
cudaMalloc(&d_update_bitmap, DIM * sizeof(bool));
convergence_error_vector = (num_type *) calloc(MAX_ITER, sizeof(num_type));
// Transform the std::vectors into device vectors
to_device_csc(d_csc_val, d_csc_non_zero, d_csc_col_idx, fixed_csc);
if (DEBUG) {
std::cout << "Initializing PR, Error, dangling bitmap, update bitmap vecors" << std::endl;
}
d_set_value << < MAX_B, MAX_T >> > (d_pr, d_to_fixed(1.0 / DIM), DIM);
d_set_value << < MAX_B, MAX_T >> > (d_error, d_to_fixed(1.0), DIM);
d_set_value << < MAX_B, MAX_T >> > (d_dangling_bitmap, true, DIM);
d_set_value << < MAX_B, MAX_T >> > (d_update_bitmap, true, DIM);
d_fixed_set_dangling_bitmap << < MAX_B, MAX_T >> > (d_dangling_bitmap, d_csc_col_idx, NON_ZERO);
// debug_print("d_dangling_bitmap", d_dangling_bitmap, DIM);
cudaMemcpy(pr, d_pr, DIM * sizeof(num_type), cudaMemcpyDeviceToHost);
cudaMemcpy(error, d_error, DIM * sizeof(num_type), cudaMemcpyDeviceToHost);
if (DEBUG) {
std::cout << "Beginning pagerank" << std::endl;
}
int iterations = 0;
bool converged = false;
const num_type F_ALPHA = d_to_fixed(ALPHA);
const num_type F_TAU = d_to_fixed(TAU);
const num_type F_SHIFT = d_to_fixed((1.0 - ALPHA) / DIM);
const num_type F_DANGLING_SCALE = d_to_fixed(ALPHA / DIM);
// Start a timer
auto pr_clock_start = std::chrono::high_resolution_clock::now();
while (!converged && iterations < MAX_ITER) {
if(USE_NO_OPTIMIZATION){
// SpMV
d_fixed_spmv << < MAX_B, MAX_T >> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error
d_fixed_compute_error << < MAX_B, MAX_T >> > (d_error, d_spmv_res, d_pr, DIM);
// Swap back the pagerank values
cudaMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), cudaMemcpyDeviceToDevice);
// Check for convergence
converged = thrust::count_if(thrust::device, d_error, d_error + DIM, is_over_error()) == 0;
}
if(USE_L2_NORM){
// SpMV
d_fixed_spmv << < MAX_B, MAX_T >> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error
d_fixed_compute_error << < MAX_B, MAX_T >> > (d_error, d_spmv_res, d_pr, DIM);
// Compute the l2 norm
num_type error_euc = euclidean_error(d_error, DIM);
//convergence_error_vector[iterations] = error_euc;
// Swap back the pagerank values
cudaMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), cudaMemcpyDeviceToDevice);
// Check for convergence
converged = error_euc <= F_TAU;
}
if(USE_L2_NORM_BITMASK){
// SpMV
d_update_fixed_spmv<< <MAX_B, MAX_T>> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, d_update_bitmap, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error and bitmask
d_update_fixed_compute_error << <MAX_B, MAX_T>> > (d_error, d_spmv_res, d_pr, d_update_bitmap, F_TAU, DIM);
// Compute the l2 norm
num_type error_euc = euclidean_error(d_error, DIM);
// convergence_error_vector[iterations] = error_euc;
// Swap back the pagerank values
cudaMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), cudaMemcpyDeviceToDevice);
// Check for convergence
converged = error_euc <= F_TAU;
}
/*
// SpMV
d_fixed_spmv << < MAX_B, MAX_T >> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, DIM);
//d_update_fixed_spmv<< <MAX_B, MAX_T>> > (d_spmv_res, d_pr, d_csc_val, d_csc_non_zero, d_csc_col_idx, d_update_bitmap, DIM);
// Dangling nodes handler
num_type res_v = d_fixed_dot(d_pr, d_dangling_bitmap, DIM);
//num_type res_v = h_fixed_dot(DIM, d_dangling_bitmap, d_pr);
//std::cout << "Thrust: " << res_v << " <-> Host: " << res_v_h << " -> diff: " << h_s_abs(res_v_h, res_v) << std::endl;
// aX + b
d_fixed_axpb << < MAX_T, MAX_B >> >(d_spmv_res, F_ALPHA, ((num_type) F_SHIFT + fixed_mult(F_DANGLING_SCALE, res_v)), DIM);
// Compute error
d_fixed_compute_error << < MAX_B, MAX_T >> > (d_error, d_spmv_res, d_pr, DIM);
//d_update_fixed_compute_error << <MAX_B, MAX_T>> > (d_error, d_spmv_res, d_pr, d_update_bitmap, F_TAU, DIM);
num_type error_euc = euclidean_error(d_error, DIM);
convergence_error_vector[iterations] = error_euc;
cudaMemcpy(d_pr, d_spmv_res, DIM * sizeof(num_type), cudaMemcpyDeviceToDevice);
//converged = thrust::count_if(thrust::device, d_error, d_error + DIM, is_over_error()) == 0;
converged = error_euc <= F_TAU;*/
iterations++;
}
// Stop the timer
auto pr_clock_end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(pr_clock_end - pr_clock_start).count();
if (DEBUG) {
std::cout << "Pagerank converged after " << duration << " ms" << std::endl;
}
cudaMemcpy(pr, d_pr, DIM * sizeof(num_type), cudaMemcpyDeviceToHost);
if (DEBUG) {
std::cout << "Pagerank converged after " << iterations << " iterations" << std::endl;
}
std::map<int, num_type> pr_map;
std::vector<std::pair<int, num_type>> sorted_pr;
std::vector<int> sorted_pr_idxs;
for (int i = 0; i < DIM; ++i) {
sorted_pr.push_back({i, pr[i]});
pr_map[i] = pr[i];
}
std::sort(sorted_pr.begin(), sorted_pr.end(),
[](const std::pair<int, num_type> &l, const std::pair<int, num_type> &r) {
if (l.second != r.second)return l.second > r.second;
else return l.first > r.first;
});
for (auto const &pair: sorted_pr) {
sorted_pr_idxs.push_back(pair.first);
//std::cout << pair.first << "," << pair.second << std::endl;
}
if (DEBUG) {
std::cout << "Checking results..." << std::endl;
std::ifstream results;
results.open("/home/fra/University/HPPS/Approximate-PR/new_ds/" + GRAPH_TYPE + "/results.txt");
int i = 0;
int tmp = 0;
int errors = 0;
int prev_left_idx = 0;
int prev_right_idx = 0;
while (results >> tmp) {
if (tmp != sorted_pr_idxs[i]) {
if (prev_left_idx != sorted_pr_idxs[i] || prev_right_idx != tmp) {
errors++;
if (errors <= 10) {
// Print only the top 10 errors
std::cout << "ERROR AT INDEX " << i << ": " << tmp << " != " << sorted_pr_idxs[i]
<< " Value => " << (num_type) pr_map[sorted_pr_idxs[i]] << std::endl;
}
}
prev_left_idx = tmp;
prev_right_idx = sorted_pr_idxs[i];
}
i++;
}
std::cout << "Percentage of error: " << (((double) errors) / (DIM)) * 100 << "%\n" << std::endl;
std::cout << "End of computation! Freeing memory..." << std::endl;
}
if (PYTHON_CONVERGENCE_ERROR_OUT) {
for (int i = 0; i < iterations; ++i) {
std::cout << "(" << i << "," << convergence_error_vector[i] << ")" << std::endl;
}
}
if (PYTHON_PAGERANK_VALUES) {
for (auto const &pair: sorted_pr) {
std::cout << pair.first << "," << pair.second << std::endl;
}
}
cudaFree(&pr);
cudaFree(&error);
cudaFree(&d_pr);
cudaFree(&d_error);
cudaFree(&d_spmv_res);
cudaFree(&d_csc_val);
cudaFree(&d_csc_non_zero);
cudaFree(&d_csc_col_idx);
cudaDeviceReset();
return 0;
}
|
2968f7f70b02778781a6ab5b6719aac70f22f258.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "lifter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *cepstrum = NULL;
hipMalloc(&cepstrum, XSIZE*YSIZE);
int nCoefs = 1;
int nhalf = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
lifter), dim3(gridBlock),dim3(threadBlock), 0, 0, cepstrum,nCoefs,nhalf);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
lifter), dim3(gridBlock),dim3(threadBlock), 0, 0, cepstrum,nCoefs,nhalf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
lifter), dim3(gridBlock),dim3(threadBlock), 0, 0, cepstrum,nCoefs,nhalf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2968f7f70b02778781a6ab5b6719aac70f22f258.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "lifter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *cepstrum = NULL;
cudaMalloc(&cepstrum, XSIZE*YSIZE);
int nCoefs = 1;
int nhalf = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
lifter<<<gridBlock,threadBlock>>>(cepstrum,nCoefs,nhalf);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
lifter<<<gridBlock,threadBlock>>>(cepstrum,nCoefs,nhalf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
lifter<<<gridBlock,threadBlock>>>(cepstrum,nCoefs,nhalf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ef4818c772d05031e38688782854083b60a703d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand.h>
#include "mult_kernels.h"
#include "zero_kernels.h"
#include <math.h>
#include <stdio.h>
#include "transpose_kernel.h"
#include "gen_gpu.h"
/***************MatMulKernel*****************/
__global__ void MatMulKernel(TYPE *out, TYPE *in, TYPE *a, const int matrixHeight, const int matrixWidth) {
// get variables for loop
// copy section of b into shared mem
// go through the threads vertically and sum them into a variable
// atomic add these variables to the corresponding c index
// looping is happening horizontally on the matrix
// BLOCK_WIDTH is again horizontal
// BLOCK_HEIGHT is going vertical
// n / BLOCK_WIDTH blocks horizontally
// m / BLOCK_HEIGHT block vertically
// get variables for loop
// variable for loop length: blockEltHeight
__shared__ int blockElt;
__shared__ int blockxInd;
__shared__ int blockyInd;
if (threadIdx.x == 0) {
if ((blockIdx.x + 1) * BLOCK_WIDTH <= matrixWidth)
blockElt = BLOCK_WIDTH;
else blockElt = matrixWidth % BLOCK_WIDTH;
blockxInd = blockIdx.x * BLOCK_WIDTH;
blockyInd = blockIdx.y * BLOCK_HEIGHT;
}
__syncthreads();
// copy section of b into shared mem
// use the first BLOCK_WIDTH of thread
__shared__ TYPE b[BLOCK_WIDTH];
if (threadIdx.x < blockElt)
b[threadIdx.x] = in[blockxInd + threadIdx.x];
__syncthreads();
// summing variable
TYPE cSum = (TYPE) 0;
int threadyInd = blockyInd + threadIdx.x;
// make sure we are inside the matrix verticallly
if (threadyInd < matrixHeight) {
// go through the threads vertically and sum them into a variable
for (int i=0; i<blockElt; i++)
// A col index : blockIdx.x * BLOCK_WIDTH + i : blockxInd + i
// A row index : blockIdx.y * BLOCK_HEIGHT + threadIdx.x : blockyInd + threadIdx.x : threadyInd
// B index : b[i]
// cSum = B index * ( A col index * matrixHeight + A row index)
cSum += b[i] * a[(blockxInd + i) * (matrixHeight) + (threadyInd)];
//printf("csum = %f\n", cSum);
// atomic add these variables to the corresponding c index
atomicAdd(out + threadyInd, cSum);
}
}
__global__ void MatMulKernelT(TYPE *out, TYPE *in, TYPE *a, const int matrixHeight, const int matrixWidth) {
// get variables for loop
// copy section of b into shared mem
// go through the threads vertically and sum them into a variable
// atomic add these variables to the corresponding c index
// looping is happening vertically on the matrix
// BLOCK_WIDTH is going vertical
// BLOCK_HEIGHT is going horizontal
// m / BLOCK_WIDTH blocks vertically
// n / BLOCK_HEIGHT block horizontally
// get variables for loop
// variable for loop length: blockElt
__shared__ int blockElt;
__shared__ int blockxInd;
__shared__ int blockyInd;
if (threadIdx.x == 0) {
if ((blockIdx.y + 1) * BLOCK_WIDTH <= matrixHeight)
blockElt = BLOCK_WIDTH;
else blockElt = matrixHeight % BLOCK_WIDTH;
blockxInd = blockIdx.x * BLOCK_HEIGHT;
blockyInd = blockIdx.y * BLOCK_WIDTH;
}
__syncthreads();
// copy section of b into shared mem
// use the first BLOCK_WIDTH of thread
__shared__ TYPE b[BLOCK_WIDTH];
if (threadIdx.x < blockElt)
b[threadIdx.x] = in[blockyInd + threadIdx.x];
__syncthreads();
// summing variable
TYPE cSum = (TYPE) 0;
int threadxInd = blockxInd + threadIdx.x;
// make sure we are inside the array horizontally
if (threadxInd < matrixWidth) {
// go through the threads vertically and sum them into a variable
for (int i=0; i<blockElt; i++)
// A col index : blockIdx.x * BLOCK_HEIGHT + threadIdx.x : blockxInd + threadIdx.x : threadxInd
// A row index : blockIdx.y * BLOCK_WIDTH + i : blockyInd + i
// B index : b[i]
// cSum = B index * ( A col index * matrixHeight + A row index)
cSum += b[i] * a[(threadxInd) * (matrixHeight) + (blockyInd + i)];
// atomic add these variables to the corresponding c index
atomicAdd(out + threadxInd , cSum);
//printf("el[%d%d;%d] csum = %f tot = %f\n", blockIdx.x, blockIdx.y, threadIdx.x, cSum, *(out + blockIdx.x * BLOCK_HEIGHT + threadIdx.x));
}
}
/***********createRandomMatrix***************/
void createRandomMatrix(TYPE *A, int size, int seed) {
float *d_A;
float *h_A = (float *) malloc (size * sizeof(float));
hiprandGenerator_t gen;
size_t size_d_A = size * sizeof(TYPE);
hipMalloc((void **) &d_A, size_d_A);
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, seed);
hiprandGenerateUniform(gen, d_A, size);
hipMemcpy(h_A, d_A, size_d_A, hipMemcpyDeviceToHost);
// for (int j = 0; j < 10; j++)
// printf("h_A[%d] = %l=f\n", j, 10* h_A[j]);
for (int j = 0; j < size; j++)
A[j] = h_A[j] / sqrt (size);
hiprandDestroyGenerator(gen);
hipFree(d_A);
free(h_A);
}
float matVecMul (float * out, float * in, float * A, const int m, const int n)
{
// set up threading and blocking variables
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockm = min(m, max_threads_per_block);
dim3 threadsPerBlockm(threads_perblockm);
int num_blocksm = (int)ceil((float)m/(float)threads_perblockm);
dim3 numBlocksm(num_blocksm);
int blockCols = (int) ceil(n / (double) BLOCK_WIDTH);
int blockRows = (int) ceil(m / (double) BLOCK_HEIGHT);
dim3 dimBlock(BLOCK_HEIGHT);
dim3 dimGrid(blockCols, blockRows);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof (float);
// set up timing
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// execute kernels
hipLaunchKernelGGL(( zero_vector_float), dim3(numBlocksm), dim3(threadsPerBlockm), 0, 0, out, m);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), sharedMem, 0, out, in, A, m, n);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
float matVecMulT (float * out, float * in, float * A, const int m, const int n)
{
// set up threading and blocking variables
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockn = min(n, max_threads_per_block);
dim3 threadsPerBlockn(threads_perblockn);
int num_blocksn = (int)ceil((float)n/(float)threads_perblockn);
dim3 numBlocksn(num_blocksn);
int blockCols = (int) ceil(n / (double) BLOCK_HEIGHT);
int blockRows = (int) ceil(m / (double) BLOCK_WIDTH);
dim3 dimBlock(BLOCK_HEIGHT);
dim3 dimGrid(blockCols, blockRows);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof (float);
// set up timing
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// execute kernels
hipLaunchKernelGGL(( zero_vector_float), dim3(numBlocksn), dim3(threadsPerBlockn), 0, 0, out, n);
hipLaunchKernelGGL(( MatMulKernelT), dim3(dimGrid), dim3(dimBlock), sharedMem, 0, out, in, A, m, n);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
float matVecMulTransposed(float * out, float * in, float * A, float * AT, const int m, const int n)
{
// set up threading and blocking variables
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockn = min(n, max_threads_per_block);
dim3 threadsPerBlockn(threads_perblockn);
int num_blocksn = (int)ceil((float)n/(float)threads_perblockn);
dim3 numBlocksn(num_blocksn);
int blockCols = (int) ceil(n / (double) BLOCK_HEIGHT);
int blockRows = (int) ceil(m / (double) BLOCK_WIDTH);
dim3 dimBlock(BLOCK_HEIGHT);
dim3 dimGridt(blockRows, blockCols);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof (float);
dim3 blocks((int)ceil (m / (float)BLOCK_DIM), (int) ceil(n / (float)BLOCK_DIM));
dim3 threads(BLOCK_DIM, BLOCK_DIM);
// set up timing
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// execute kernels
hipLaunchKernelGGL(( zero_vector_float), dim3(numBlocksn), dim3(threadsPerBlockn), 0, 0, out, n);
hipLaunchKernelGGL(( transpose), dim3(blocks), dim3(threads), 0, 0, AT, A, m, n);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGridt), dim3(dimBlock), sharedMem, 0, out, in, AT, n, m);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
float matVecNaive (float * out, float * in, float * A, const int m, const int n) {
// set up threading and blocking variables
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockm = min(m, max_threads_per_block);
dim3 threadsPerBlockm(threads_perblockm);
int num_blocksm = (int)ceil((float)m/(float)threads_perblockm);
dim3 numBlocksm(num_blocksm);
// set up timing
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// execute kernel
hipLaunchKernelGGL(( gen_matvec) , dim3(numBlocksm), dim3(threadsPerBlockm) , 0, 0, (float*)A, (float*)in, (float*)out, m, n);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
float matVecNaiveTrans (float * out, float * in, float * A, const int m, const int n) {
// set up threading and blocking variables
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockn = min(n, max_threads_per_block);
dim3 threadsPerBlockn(threads_perblockn);
int num_blocksn = (int)ceil((float)n/(float)threads_perblockn);
dim3 numBlocksn(num_blocksn);
// set up timing
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// execute kernel
hipLaunchKernelGGL(( gen_matvecT) , dim3(numBlocksn), dim3(threadsPerBlockn) , 0, 0, (float*)A, (float*)out, (float*)in, m, n);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return time;
}
| ef4818c772d05031e38688782854083b60a703d9.cu | #include <curand.h>
#include "mult_kernels.h"
#include "zero_kernels.h"
#include <math.h>
#include <stdio.h>
#include "transpose_kernel.h"
#include "gen_gpu.h"
/***************MatMulKernel*****************/
__global__ void MatMulKernel(TYPE *out, TYPE *in, TYPE *a, const int matrixHeight, const int matrixWidth) {
// get variables for loop
// copy section of b into shared mem
// go through the threads vertically and sum them into a variable
// atomic add these variables to the corresponding c index
// looping is happening horizontally on the matrix
// BLOCK_WIDTH is again horizontal
// BLOCK_HEIGHT is going vertical
// n / BLOCK_WIDTH blocks horizontally
// m / BLOCK_HEIGHT block vertically
// get variables for loop
// variable for loop length: blockEltHeight
__shared__ int blockElt;
__shared__ int blockxInd;
__shared__ int blockyInd;
if (threadIdx.x == 0) {
if ((blockIdx.x + 1) * BLOCK_WIDTH <= matrixWidth)
blockElt = BLOCK_WIDTH;
else blockElt = matrixWidth % BLOCK_WIDTH;
blockxInd = blockIdx.x * BLOCK_WIDTH;
blockyInd = blockIdx.y * BLOCK_HEIGHT;
}
__syncthreads();
// copy section of b into shared mem
// use the first BLOCK_WIDTH of thread
__shared__ TYPE b[BLOCK_WIDTH];
if (threadIdx.x < blockElt)
b[threadIdx.x] = in[blockxInd + threadIdx.x];
__syncthreads();
// summing variable
TYPE cSum = (TYPE) 0;
int threadyInd = blockyInd + threadIdx.x;
// make sure we are inside the matrix verticallly
if (threadyInd < matrixHeight) {
// go through the threads vertically and sum them into a variable
for (int i=0; i<blockElt; i++)
// A col index : blockIdx.x * BLOCK_WIDTH + i : blockxInd + i
// A row index : blockIdx.y * BLOCK_HEIGHT + threadIdx.x : blockyInd + threadIdx.x : threadyInd
// B index : b[i]
// cSum = B index * ( A col index * matrixHeight + A row index)
cSum += b[i] * a[(blockxInd + i) * (matrixHeight) + (threadyInd)];
//printf("csum = %f\n", cSum);
// atomic add these variables to the corresponding c index
atomicAdd(out + threadyInd, cSum);
}
}
__global__ void MatMulKernelT(TYPE *out, TYPE *in, TYPE *a, const int matrixHeight, const int matrixWidth) {
// get variables for loop
// copy section of b into shared mem
// go through the threads vertically and sum them into a variable
// atomic add these variables to the corresponding c index
// looping is happening vertically on the matrix
// BLOCK_WIDTH is going vertical
// BLOCK_HEIGHT is going horizontal
// m / BLOCK_WIDTH blocks vertically
// n / BLOCK_HEIGHT block horizontally
// get variables for loop
// variable for loop length: blockElt
__shared__ int blockElt;
__shared__ int blockxInd;
__shared__ int blockyInd;
if (threadIdx.x == 0) {
if ((blockIdx.y + 1) * BLOCK_WIDTH <= matrixHeight)
blockElt = BLOCK_WIDTH;
else blockElt = matrixHeight % BLOCK_WIDTH;
blockxInd = blockIdx.x * BLOCK_HEIGHT;
blockyInd = blockIdx.y * BLOCK_WIDTH;
}
__syncthreads();
// copy section of b into shared mem
// use the first BLOCK_WIDTH of thread
__shared__ TYPE b[BLOCK_WIDTH];
if (threadIdx.x < blockElt)
b[threadIdx.x] = in[blockyInd + threadIdx.x];
__syncthreads();
// summing variable
TYPE cSum = (TYPE) 0;
int threadxInd = blockxInd + threadIdx.x;
// make sure we are inside the array horizontally
if (threadxInd < matrixWidth) {
// go through the threads vertically and sum them into a variable
for (int i=0; i<blockElt; i++)
// A col index : blockIdx.x * BLOCK_HEIGHT + threadIdx.x : blockxInd + threadIdx.x : threadxInd
// A row index : blockIdx.y * BLOCK_WIDTH + i : blockyInd + i
// B index : b[i]
// cSum = B index * ( A col index * matrixHeight + A row index)
cSum += b[i] * a[(threadxInd) * (matrixHeight) + (blockyInd + i)];
// atomic add these variables to the corresponding c index
atomicAdd(out + threadxInd , cSum);
//printf("el[%d%d;%d] csum = %f tot = %f\n", blockIdx.x, blockIdx.y, threadIdx.x, cSum, *(out + blockIdx.x * BLOCK_HEIGHT + threadIdx.x));
}
}
/***********createRandomMatrix***************/
void createRandomMatrix(TYPE *A, int size, int seed) {
float *d_A;
float *h_A = (float *) malloc (size * sizeof(float));
curandGenerator_t gen;
size_t size_d_A = size * sizeof(TYPE);
cudaMalloc((void **) &d_A, size_d_A);
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, seed);
curandGenerateUniform(gen, d_A, size);
cudaMemcpy(h_A, d_A, size_d_A, cudaMemcpyDeviceToHost);
// for (int j = 0; j < 10; j++)
// printf("h_A[%d] = %l=f\n", j, 10* h_A[j]);
for (int j = 0; j < size; j++)
A[j] = h_A[j] / sqrt (size);
curandDestroyGenerator(gen);
cudaFree(d_A);
free(h_A);
}
float matVecMul (float * out, float * in, float * A, const int m, const int n)
{
// set up threading and blocking variables
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockm = min(m, max_threads_per_block);
dim3 threadsPerBlockm(threads_perblockm);
int num_blocksm = (int)ceil((float)m/(float)threads_perblockm);
dim3 numBlocksm(num_blocksm);
int blockCols = (int) ceil(n / (double) BLOCK_WIDTH);
int blockRows = (int) ceil(m / (double) BLOCK_HEIGHT);
dim3 dimBlock(BLOCK_HEIGHT);
dim3 dimGrid(blockCols, blockRows);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof (float);
// set up timing
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// execute kernels
zero_vector_float<<<numBlocksm, threadsPerBlockm>>>(out, m);
MatMulKernel<<<dimGrid, dimBlock, sharedMem>>>(out, in, A, m, n);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
float matVecMulT (float * out, float * in, float * A, const int m, const int n)
{
// set up threading and blocking variables
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockn = min(n, max_threads_per_block);
dim3 threadsPerBlockn(threads_perblockn);
int num_blocksn = (int)ceil((float)n/(float)threads_perblockn);
dim3 numBlocksn(num_blocksn);
int blockCols = (int) ceil(n / (double) BLOCK_HEIGHT);
int blockRows = (int) ceil(m / (double) BLOCK_WIDTH);
dim3 dimBlock(BLOCK_HEIGHT);
dim3 dimGrid(blockCols, blockRows);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof (float);
// set up timing
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// execute kernels
zero_vector_float<<<numBlocksn, threadsPerBlockn>>>(out, n);
MatMulKernelT<<<dimGrid, dimBlock, sharedMem>>>(out, in, A, m, n);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
float matVecMulTransposed(float * out, float * in, float * A, float * AT, const int m, const int n)
{
// set up threading and blocking variables
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockn = min(n, max_threads_per_block);
dim3 threadsPerBlockn(threads_perblockn);
int num_blocksn = (int)ceil((float)n/(float)threads_perblockn);
dim3 numBlocksn(num_blocksn);
int blockCols = (int) ceil(n / (double) BLOCK_HEIGHT);
int blockRows = (int) ceil(m / (double) BLOCK_WIDTH);
dim3 dimBlock(BLOCK_HEIGHT);
dim3 dimGridt(blockRows, blockCols);
int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof (float);
dim3 blocks((int)ceil (m / (float)BLOCK_DIM), (int) ceil(n / (float)BLOCK_DIM));
dim3 threads(BLOCK_DIM, BLOCK_DIM);
// set up timing
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// execute kernels
zero_vector_float<<<numBlocksn, threadsPerBlockn>>>(out, n);
transpose<<<blocks, threads>>>(AT, A, m, n);
MatMulKernel<<<dimGridt, dimBlock, sharedMem>>>(out, in, AT, n, m);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
float matVecNaive (float * out, float * in, float * A, const int m, const int n) {
// set up threading and blocking variables
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockm = min(m, max_threads_per_block);
dim3 threadsPerBlockm(threads_perblockm);
int num_blocksm = (int)ceil((float)m/(float)threads_perblockm);
dim3 numBlocksm(num_blocksm);
// set up timing
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// execute kernel
gen_matvec <<< numBlocksm, threadsPerBlockm >>>((float*)A, (float*)in, (float*)out, m, n);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
float matVecNaiveTrans (float * out, float * in, float * A, const int m, const int n) {
// set up threading and blocking variables
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp,0);
unsigned int max_threads_per_block = dp.maxThreadsPerBlock;
int threads_perblockn = min(n, max_threads_per_block);
dim3 threadsPerBlockn(threads_perblockn);
int num_blocksn = (int)ceil((float)n/(float)threads_perblockn);
dim3 numBlocksn(num_blocksn);
// set up timing
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// execute kernel
gen_matvecT <<< numBlocksn, threadsPerBlockn >>>((float*)A, (float*)out, (float*)in, m, n);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
|
01e7b3334ba00ad67e7228e4dc6a088d5bdeb480.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: mphoward
#include "NeighborListGPUTree_hip.cuh"
#include "TextureTools.h"
#include "hipcub/hipcub.hpp"
#define MORTON_CODE_BITS 30 //!< Length of the Morton code in bits (k = 10 bits per direction)
#define MORTON_CODE_N_BINS 1024 //!< Number of bins (2^10) per direction to generate 30 bit Morton codes
#define MORTON_TYPE_MASK_64 0x000000003fffffffu //!< 64 bit mask to turn morton code-type back to morton code
/*! \file NeighborListGPUTree.cu
\brief Defines GPU kernel code for neighbor list tree traversal on the GPU
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading leaf data
scalar4_tex_t leaf_xyzf_tex;
//! Texture for the diameter / body
scalar2_tex_t leaf_db_tex;
//! Texture for reading node upper and lower bounds
scalar4_tex_t aabb_node_bounds_tex;
//! Texture for the head list
texture<unsigned int, 1, hipReadModeElementType> head_list_tex;
//!< Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit.
/*!
* \param v unsigned integer with 10 bits set
* \returns The integer expanded with two zeros interleaved between bits
* http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/
*/
__device__ inline unsigned int expandBits(unsigned int v)
{
v = (v * 0x00010001u) & 0xFF0000FFu;
v = (v * 0x00000101u) & 0x0F00F00Fu;
v = (v * 0x00000011u) & 0xC30C30C3u;
v = (v * 0x00000005u) & 0x49249249u;
return v;
}
//! Assigns the Morton code-type key for each particle on this processor
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
*
* \b Implementation
* A sorting key is generated for each particle by determining the 30 bit Morton code for each particle, and then
* concatenating onto the type. Both the Morton code and the type are 32 bit integers, so the concatenation is stored
* compactly in a 64 bit integer morton_type = (type << 30) + morton code. In this way, a lexicographic sort will
* sort first by type, then by morton code. The corresponding particle id (thread index) is stashed into d_map_tree_pid
* to track particles after sorting.
*/
__global__ void gpu_nlist_morton_types_kernel(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim box,
const Scalar3 ghost_width)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N+nghosts)
return;
// acquire particle data
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const unsigned int type = __scalar_as_int(postype.w);
// get position in simulation box
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
/* check if the particle is inside the unit cell + ghost layer in all dimensions
* this tolerance is small enough that when we multiply by the morton code bin size, we are still in range
* we silently ignore ghosts outside of this width, and instead deal with that special case below
* where extra ghosts are communicated (e.g. for bonded interactions)
*/
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) && idx < N)
{
*d_morton_conditions = idx;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * MORTON_CODE_N_BINS);
int jb = (int)(f.y * MORTON_CODE_N_BINS);
int kb = (int)(f.z * MORTON_CODE_N_BINS);
if (!periodic.x) // ghosts exist and may be past layer width
{
// handle special cases where random ghosts are beyond the expected layer
// by just rounding to the nearest edge
if (ib < 0)
{
ib = 0;
}
else if (ib >= MORTON_CODE_N_BINS)
{
ib = MORTON_CODE_N_BINS - 1;
}
}
else if (ib == MORTON_CODE_N_BINS) // some particles lie exactly on the edge, floor them to zero
{
ib = 0;
}
// do as for x in y
if (!periodic.y)
{
if (jb < 0)
{
jb = 0;
}
else if (jb >= MORTON_CODE_N_BINS)
{
jb = MORTON_CODE_N_BINS - 1;
}
}
else if (jb == MORTON_CODE_N_BINS)
{
jb = 0;
}
// do as for y in z
if (!periodic.z)
{
if (kb < 0)
{
kb = 0;
}
else if (kb >= MORTON_CODE_N_BINS)
{
kb = MORTON_CODE_N_BINS - 1;
}
}
else if (kb == MORTON_CODE_N_BINS)
{
kb = 0;
}
// inline call to some bit swizzling arithmetic
unsigned int ii = expandBits((unsigned int)ib);
unsigned int jj = expandBits((unsigned int)jb);
unsigned int kk = expandBits((unsigned int)kb);
unsigned int morton_code = ii * 4 + jj * 2 + kk;
// save the morton code and corresponding particle index for sorting
// the morton codes hold both the type and the code to sort by both type and position simultaneously
d_morton_types[idx] = (((uint64_t)type) << MORTON_CODE_BITS) + (uint64_t)morton_code;
d_map_tree_pid[idx] = idx;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
* \param block_size Requested thread block size of kernel launch
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_morton_types(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim& box,
const Scalar3 ghost_width,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_morton_types_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
hipLaunchKernelGGL(( gpu_nlist_morton_types_kernel), dim3((N+nghosts)/run_block_size + 1), dim3(run_block_size), 0, 0, d_morton_types,
d_map_tree_pid,
d_morton_conditions,
d_pos,
N,
nghosts,
box,
ghost_width);
return hipSuccess;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_morton_types_alt Auxiliary array of equal size to d_morton_types for double buffered sorting
* \param d_map_tree_pid List of particle ids
* \param d_map_tree_pid_alt Auxiliary array of equal size to d_map_tree_pid for double buffered sorting
* \param d_tmp_storage Temporary storage in device memory
* \param tmp_storage_bytes Number of bytes allocated for temporary storage
* \param swap_morton Flag to switch real data from auxiliary array to primary array after sorting
* \param swap_map Flag to switch real data from auxiliary array to primary array after sorting
* \param Ntot Total number of keys to sort
* \param n_type_bits Number of bits to check for particle types
*
* \returns hipSuccess on completion
*
* \b Implementation
* The CUB library is used for device-wide radix sorting. Radix sorting is O(kN) where k is the number of bits to check
* in an unsigned integer key, and N is the number of keys. We restrict the number of bits checked in the max 64 bit
* keys by only checking up to the MORTON_CODE_BITS + n_type_bits most significant bit. CUB DeviceRadixSort performs
* its own tuning at run time.
*
* Because CUB requires temporary storage, this function must be called twice. First, when \a d_tmp_storage is NULL,
* the number of bytes required for temporary storage is saved in \a tmp_storage_bytes. This memory must then be
* allocated in \a d_tmp_storage. On the second call, the radix sort is performed. Because the radix sort may put the
* active (sorted) buffer in either slot of the DoubleBuffer, a boolean flag is set in \a swap_morton and \a swap_map
* for whether these data arrays should be swapped.
*/
hipError_t gpu_nlist_morton_sort(uint64_t *d_morton_types,
uint64_t *d_morton_types_alt,
unsigned int *d_map_tree_pid,
unsigned int *d_map_tree_pid_alt,
void *d_tmp_storage,
size_t &tmp_storage_bytes,
bool &swap_morton,
bool &swap_map,
const unsigned int Ntot,
const unsigned int n_type_bits)
{
// initialize memory as "double buffered"
cub::DoubleBuffer<uint64_t> d_keys(d_morton_types, d_morton_types_alt);
cub::DoubleBuffer<unsigned int> d_vals(d_map_tree_pid, d_map_tree_pid_alt);
// on the first pass, this just sizes the temporary storage
// on the second pass, it actually does the radix sort
hipcub::DeviceRadixSort::SortPairs(d_tmp_storage,
tmp_storage_bytes,
d_keys,
d_vals,
Ntot,
0,
MORTON_CODE_BITS+n_type_bits);
// we've only done something to the buffers on the second time when temporary storage is allocated
if (d_tmp_storage != NULL)
{
// mark that the gpu arrays should be flipped if the final result is not in the right array
swap_morton = (d_keys.selector == 1);
swap_map = (d_vals.selector == 1);
}
return hipSuccess;
}
//! Kernel to merge adjacent codes into leaf nodes
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread per leaf is called, and is responsible for merging NLIST_GPU_PARTICLES_PER_LEAF into an AABB. Each thread
* first determines what type of leaf particle it is operating on by calculating and iterating on the number of leafs
* of each type. Then, the starting index is determined by subtracting d_leaf_offset[type] from the starting index that
* would be set in a nleaf x NLIST_GPU_PARTICLES_PER_LEAF array. The reason for this complexity is that the leaf particle
* array is not permitted to have any "holes" in it for faster traversal. The AABB is merged from the particle
* positions, and a Morton code is assigned to this AABB for determining tree hierarchy based on the Morton code of
* the first particle in the leaf. Although this does not necessarily generate the best ordering along the Z order curve
* for the newly merged leafs, it does guarantee that the leaf Morton codes are still in lexicographic ordering.
*
* AABBs are stored as two Scalar4s in a flat array. The first three coordinates of each Scalar4 correspond to the upper
* and lower bounds of the AABB. The last value of the upper AABB will hold a "rope" for traversing the tree (see
* gpu_nlist_bubble_aabbs_kernel), while the last value of the lower AABB holds the number of particles for a leaf node,
* or the left child for an internal node. This is determined by setting a bit to mark this value as a rope or as child.
*/
__global__ void gpu_nlist_merge_particles_kernel(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs)
{
// leaf index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per leaf
if (idx >= nleafs)
return;
// get what type of leaf I am
unsigned int total_bins = 0;
int leaf_type = -1;
unsigned int max_idx = Ntot;
for (unsigned int cur_type=0; leaf_type == -1 && cur_type < ntypes; ++cur_type)
{
total_bins += (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (idx < total_bins)
{
leaf_type = cur_type;
for (unsigned int next_type=cur_type+1; next_type < ntypes; ++next_type)
{
if (d_type_head[next_type])
{
max_idx = d_type_head[next_type] - 1;
break; // quit out of this inner loop once a match is found
}
}
break; // quit the outer loop
}
}
// get the starting particle index assuming naive leaf structure, and then subtract offset to eliminate "holes"
unsigned int start_idx = idx*NLIST_GPU_PARTICLES_PER_LEAF - d_leaf_offset[leaf_type];
unsigned int end_idx = (max_idx - start_idx > NLIST_GPU_PARTICLES_PER_LEAF) ? start_idx + NLIST_GPU_PARTICLES_PER_LEAF : max_idx;
// upper also holds the skip value, but we have no idea what this is right now
Scalar4 upper = d_pos[ d_map_tree_pid[start_idx] ];
upper.w = 0.0f;
// lower holds the particle number, we have one already
Scalar4 lower = upper;
unsigned int npart = 1;
for (unsigned int cur_p=start_idx+1; cur_p < end_idx; ++cur_p)
{
Scalar4 cur_pos = d_pos[ d_map_tree_pid[cur_p] ];
// merge the boxes together
if (cur_pos.x < lower.x) lower.x = cur_pos.x;
if (cur_pos.x > upper.x) upper.x = cur_pos.x;
if (cur_pos.y < lower.y) lower.y = cur_pos.y;
if (cur_pos.y > upper.y) upper.y = cur_pos.y;
if (cur_pos.z < lower.z) lower.z = cur_pos.z;
if (cur_pos.z > upper.z) upper.z = cur_pos.z;
++npart;
}
d_tree_aabbs[2*idx] = upper;
d_tree_aabbs[2*idx + 1] = make_scalar4(lower.x, lower.y, lower.z, __int_as_scalar(npart << 1));
// take logical AND with the 30 bit mask for the morton codes to extract just the morton code
// no sense swinging around 64 bit integers anymore
d_morton_codes_red[idx] = (unsigned int)(d_morton_types[start_idx] & MORTON_TYPE_MASK_64);
// fill the parent/sib relationships as if everything is a single leaf at first, to be overridden by hierarchy gen
// when this is not the case
d_tree_parent_sib[idx] = make_uint2(idx, idx << 1);
}
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_merge_particles(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_merge_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
hipLaunchKernelGGL(( gpu_nlist_merge_particles_kernel), dim3(nleafs/run_block_size + 1), dim3(block_size), 0, 0, d_tree_aabbs,
d_morton_codes_red,
d_tree_parent_sib,
d_morton_types,
d_pos,
d_num_per_type,
ntypes,
d_map_tree_pid,
d_leaf_offset,
d_type_head,
Ntot,
nleafs);
return hipSuccess;
}
//! Computes the longest common prefix between Morton codes
/*!
* \param d_morton_codes Array of Morton codes
* \param i First Morton code index
* \param j Second Morton code index
* \param min_idx The smallest index considered "in range" (inclusive)
* \param max_idx The last index considered "in range" (inclusive)
*
* \returns number of bits shared between the Morton codes of i and j
*
* delta(i,j) is defined as the largest number of bits shared between Morton codes i and j. When the Morton codes are
* sorted, this implies delta(i',j') >= delta(i,j) for any i',j' in [i,j]. If i and j lie outside
* of the range of Morton codes corresponding to this tree, then it always returns -1. If the Morton codes for i and j
* are identical, then the longest prefix of i and j is used as a tie breaker.
*/
__device__ inline int delta(const uint32_t *d_morton_codes,
unsigned int i,
unsigned int j,
int min_idx,
int max_idx)
{
if (j > max_idx || j < min_idx)
{
return -1;
}
uint32_t first_code = d_morton_codes[i];
uint32_t last_code = d_morton_codes[j];
// if codes match, then use index as tie breaker
// the number of shared bits is equal to the 32 bits in the integer, plus the number of bits shared between the
// indexes (offset from the start of the node range to make things simpler)
if (first_code == last_code)
{
return (32 + __clz((i-min_idx) ^ (j-min_idx)));
}
else
{
return __clz(first_code ^ last_code);
}
}
//! Determines the range of Morton codes that a node covers
/*!
* \param d_morton_codes Array of Morton codes
* \param min_idx The smallest Morton code index considered "in range" (inclusive)
* \param max_idx The last Morton code index considered "in range" (inclusive)
* \param idx Current node (Morton code) index
*
* \returns the minimum and maximum leafs covered by this node
* \note This is a literal implementation of the Karras pseudocode, with no optimizations or refinement.
* Tero Karras, "Maximizing parallelism in the construction of BVHs, octrees, and k-d trees",
* High Performance Graphics (2012).
*/
__device__ inline uint2 determineRange(const uint32_t *d_morton_codes,
const int min_idx,
const int max_idx,
const int idx)
{
int forward_prefix = delta(d_morton_codes, idx, idx+1, min_idx, max_idx);
int backward_prefix = delta(d_morton_codes, idx, idx-1, min_idx, max_idx);
// get direction of the range based on sign
int d = ((forward_prefix - backward_prefix) > 0) ? 1 : -1;
// get minimum prefix
int min_prefix = delta(d_morton_codes, idx, idx-d, min_idx, max_idx);
// get maximum prefix by binary search
int lmax = 2;
while( delta(d_morton_codes, idx, idx + d*lmax, min_idx, max_idx) > min_prefix)
{
lmax = lmax << 1;
}
unsigned int len = 0;
unsigned int step = lmax;
do
{
step = step >> 1;
unsigned int new_len = len + step;
if (delta(d_morton_codes, idx, idx + d*new_len, min_idx, max_idx) > min_prefix)
len = new_len;
}
while (step > 1);
// order range based on direction
uint2 range;
if (d > 0)
{
range.x = idx;
range.y = idx + len;
}
else
{
range.x = idx - len;
range.y = idx;
}
return range;
}
//! Finds the split position in Morton codes covered by a range
/*!
* \param d_morton_codes Array of Morton codes
* \param first First leaf node in the range
* \param last Last leaf node in the range
*
* \returns the leaf index corresponding to the split in Morton codes
* See determineRange for original source of algorithm.
*/
__device__ inline unsigned int findSplit(const uint32_t *d_morton_codes,
const unsigned int first,
const unsigned int last)
{
uint32_t first_code = d_morton_codes[first];
uint32_t last_code = d_morton_codes[last];
// if codes match, then just split evenly
if (first_code == last_code)
return (first + last) >> 1;
// get the length of the common prefix
int common_prefix = __clz(first_code ^ last_code);
// assume split starts at first, and begin binary search
unsigned int split = first;
unsigned int step = last - first;
do
{
// exponential decrease (is factor of 2 best?)
step = (step + 1) >> 1;
unsigned int new_split = split + step;
// if proposed split lies within range
if (new_split < last)
{
unsigned int split_code = d_morton_codes[new_split];
int split_prefix = __clz(first_code ^ split_code);
// if new split shares a longer number of bits, accept it
if (split_prefix > common_prefix)
{
split = new_split;
}
}
}
while (step > 1);
return split;
}
//! Kernel to generate the parent-child-sibling relationships between nodes
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
*
* \b Implementation
* One thread is called per internal node in a single kernel launch. Each thread first determines its "local" index
* as an internal node within a tree based on the number of leafs per tree. The range of leafs covered by the internal
* node is determined, and then its split position is identified. The split identifies the children of the node as
* another internal node or as a leaf node.
*
* The parent and sibling of each child node is saved. The sibling id is bit shifted so as to use a single bit to encode
* the sibling as a right child or left child (after shifting, we set the bit to 1 if the sibling is a right child).
* If the child is a root node, it also saves information for itself (since no other node ever identifies a root as a
* child node).
*/
__global__ void gpu_nlist_gen_hierarchy_kernel(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal)
{
// compute the internal node index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per internal node
if (idx >= ninternal)
return;
// get what type of leaf I am
unsigned int min_idx = 0; // the "0" of the leaf node array
unsigned int max_idx = 0; // the "N-1" of the leaf node array
unsigned int node_idx = idx;
unsigned int origin = 0;
unsigned int end = 0;
unsigned int cur_type=0;
unsigned int active_types=0;
for (cur_type=0; cur_type < ntypes; ++cur_type)
{
// current min index is the previous max index
min_idx = max_idx;
// max index adds the number of internal nodes in this type (nleaf - 1)
const unsigned int cur_nleaf = (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (cur_nleaf > 0)
{
max_idx += cur_nleaf-1;
++active_types;
}
// we break the loop if we are in range
if (idx < max_idx)
{
// decrement by 1 to get this back into the number we really need
--active_types;
// now, we repurpose the min and max index to now correspond to the *leaf* index.
// the min index is the minimum *leaf* index
origin = min_idx + active_types;
end = max_idx + active_types;
node_idx += active_types;
break;
}
}
// enact the magical split determining
uint2 range = determineRange(d_morton_codes, origin, end, node_idx);
unsigned int first = range.x;
unsigned int last = range.y;
unsigned int split = findSplit(d_morton_codes, first, last);
uint2 children;
// set the children, shifting ahead by nleafs - cur_type to account for leaf shifting
// this factor comes out from resetting 0 = N_leaf,i each time, and then remapping this to
// an internal node
children.x = (split == first) ? split : (nleafs - active_types + split);
children.y = ((split + 1) == last) ? (split + 1) : nleafs - active_types + split + 1;
uint2 parent_sib;
parent_sib.x = nleafs + idx;
// encode the sibling as the right child
parent_sib.y = children.y << 1;
parent_sib.y |= 1;
d_tree_parent_sib[children.x] = parent_sib;
// encode the sibling as the left child
parent_sib.y = children.x << 1;
d_tree_parent_sib[children.y] = parent_sib;
// root is always number "zero", but only it can set its parent / sibling
// we mark both of these as the root for traversing, since only the root node
// will be its own sibling
if (node_idx == origin)
{
parent_sib.x = nleafs + idx;
parent_sib.y = (nleafs + idx) << 1;
d_tree_parent_sib[nleafs + idx] = parent_sib;
}
}
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_gen_hierarchy(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_gen_hierarchy_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// one thread per internal node
hipLaunchKernelGGL(( gpu_nlist_gen_hierarchy_kernel), dim3(ninternal/run_block_size + 1), dim3(run_block_size), 0, 0, d_tree_parent_sib,
d_morton_codes,
d_num_per_type,
ntypes,
nleafs,
ninternal);
return hipSuccess;
}
//! Kernel to bubble up enclosing AABBs to internal nodes from leaf nodes
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread is called per leaf node. The second thread to reach an internal node processes its two children,
* which guarantees that no node AABB is prematurely processed. The arrival order at a node is controlled by an atomic
* thread lock in global memory. This locking could be accelerated by using shared memory whenever a node is being
* processed by threads in the same block.
*
* When processing the node, the thread also walks up the tree to find the "rope" that tells a traverser
* how to navigate the tree. If a query AABB intersects the current node, then the traverser always moves the the left
* child of the current node. If the AABB does not intersect, it moves along the "rope" to the next portion of the tree.
* The "rope" is calculated by walking back up the tree to find the earliest ancestor that is a left child of its
* parent. The rope then goes to that ancestor's sibling. If the root node is reached, then the rope is set to -1 to
* indicate traversal should be aborted.
*
* This kernel also encodes the left child of a node into the AABB for internal nodes. The thread processing the node
* checks if it arrived from a left child or right child of the node it is processing, and sets the left child of that
* parent accordingly. A child is indicated by bit shifting, and setting the first bit to 1.
*/
__global__ void gpu_nlist_bubble_aabbs_kernel(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs)
{
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nleafs)
return;
// okay, first we start from the leaf and set my bounding box
Scalar4 cur_upper = d_tree_aabbs[2*idx];
Scalar4 cur_lower = d_tree_aabbs[2*idx+1];
// zero the counters for internal nodes
cur_upper.w = 0.0f;
cur_lower.w = 0.0f;
unsigned int cur_node = idx;
unsigned int lock_key = 0;
do
{
uint2 cur_parent_sib = d_tree_parent_sib[cur_node];
unsigned int cur_parent = cur_parent_sib.x;
// if the current sibling is a right child, then the current node is a left child
bool cur_is_left = (cur_parent_sib.y & 1);
unsigned int cur_sibling = cur_parent_sib.y >> 1;
// first we compute the skip for this node always
// back track up the tree until you find a left child
// we have a check in place so that we don't stall on the root node
uint2 backtrack = cur_parent_sib;
while (!(backtrack.y & 1) && backtrack.x != (backtrack.y >> 1))
{
backtrack = d_tree_parent_sib[backtrack.x];
}
// then, the skip is to the sibling of that node, or else to quit
if (backtrack.y & 1)
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(backtrack.y >> 1);
}
else
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(-1.0);
}
// then, we do an atomicAdd on the lock to see if we need to process the parent AABBs
// check to make sure the parent is bigger than nleafs, or else the node lock always fails
// so that we terminate the thread
lock_key = (cur_parent >= nleafs) ? atomicAdd(d_node_locks + cur_parent - nleafs, 1) : 0;
// process the node
if (lock_key == 1)
{
// compute the max upper bound
Scalar4 sib_upper = d_tree_aabbs[2*cur_sibling];
if (sib_upper.x > cur_upper.x) cur_upper.x = sib_upper.x;
if (sib_upper.y > cur_upper.y) cur_upper.y = sib_upper.y;
if (sib_upper.z > cur_upper.z) cur_upper.z = sib_upper.z;
d_tree_aabbs[2*cur_parent] = cur_upper;
// compute the min lower bound
Scalar4 sib_lower = d_tree_aabbs[2*cur_sibling+1];
if (sib_lower.x < cur_lower.x) cur_lower.x = sib_lower.x;
if (sib_lower.y < cur_lower.y) cur_lower.y = sib_lower.y;
if (sib_lower.z < cur_lower.z) cur_lower.z = sib_lower.z;
// this must always be some internal node, so stash the left child of this node here
unsigned int left_child_masked = ((cur_is_left ? cur_node : cur_sibling) << 1) | 1;
cur_lower.w = __int_as_scalar( left_child_masked );
d_tree_aabbs[2*cur_parent+1] = cur_lower;
// bump the current node one level
cur_node = cur_parent;
}
}
while (lock_key == 1);
}
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_bubble_aabbs(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
hipMemset(d_node_locks, 0, sizeof(unsigned int)*ninternal);
hipLaunchKernelGGL(( gpu_nlist_bubble_aabbs_kernel), dim3(nleafs/block_size + 1), dim3(block_size), 0, 0, d_node_locks,
d_tree_aabbs,
d_tree_parent_sib,
ntypes,
nleafs);
return hipSuccess;
}
//! Kernel to rearrange particle data into leaf order for faster traversal
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
*
* \b Implementation
* One thread per particle is called. Writes are coalesced by writing in leaf order, and reading in a scattered way.
*/
__global__ void gpu_nlist_move_particles_kernel(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot)
{
// get thread index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= Ntot)
return;
// read and write particle data
unsigned int p_idx = d_map_tree_pid[idx];
Scalar4 pos_i = d_pos[p_idx];
d_leaf_xyzf[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(p_idx));
Scalar2 db = make_scalar2(d_diameter[p_idx], __int_as_scalar(d_body[p_idx]));
d_leaf_db[idx] = db;
}
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_move_particles(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_move_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
hipLaunchKernelGGL(( gpu_nlist_move_particles_kernel), dim3(Ntot/run_block_size + 1), dim3(run_block_size), 0, 0, d_leaf_xyzf,
d_leaf_db,
d_pos,
d_diameter,
d_body,
d_map_tree_pid,
Ntot);
return hipSuccess;
}
//! Kernel for traversing tree to generate neighbor list
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
*
* \b Implementation
* One thread is launched per particle, but the threads operate on particles in leaf order rather than ParticleData
* order in order to minimize divergence within a warp (particles in the same leaf should intersect similar parts of the
* tree). Each thread iterates on the particle types (trees) and queries on all translation vectors using a stackless
* search. When the query AABB intersects a node AABB, the node AABB is checked to be an internal node or a leaf node.
* If an internal node, then the traversal advances to that node's left child. If a leaf node, the leaf particles are
* tested directly to be included in the neighbor list. The node then advances along that leaf node's rope. If the AABB
* is not intersected, the traversal advances along the rope. This process proceeds until a rope signals that the
* traversal is complete.
*/
template<unsigned char flags>
__global__ void gpu_nlist_traverse_tree_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
const Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
unsigned int *s_leaf_offset = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters + sizeof(unsigned int)*ntypes]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
s_leaf_offset[cur_offset + threadIdx.x] = d_leaf_offset[cur_offset + threadIdx.x];
}
}
__syncthreads();
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the leaf list
if (idx >= (N+nghosts))
return;
// read in the current position
unsigned int my_pidx = d_map_tree_pid[idx];
// we only process particles owned by this processor for neighbors
if (my_pidx >= N)
return;
const Scalar4 postype_i = texFetchScalar4(d_pos, pdata_pos_tex, my_pidx);
const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z);
const unsigned int type_i = __scalar_as_int(postype_i.w);
// fetch the diameter and body out of the leaf texture since it's bound anyway
const Scalar2 db_i = texFetchScalar2(d_leaf_db, leaf_db_tex, idx);
const Scalar diam_i = db_i.x;
const unsigned int body_i = __scalar_as_int(db_i.y);
const unsigned int nlist_head_i = texFetchUint(d_head_list, head_list_tex, my_pidx);
unsigned int n_neigh_i = 0;
for (unsigned int cur_pair_type=0; cur_pair_type < ntypes; ++cur_pair_type)
{
// Check primary box
const Scalar r_cut_i = s_r_list[typpair_idx(type_i,cur_pair_type)];
// Skip this tree type if it is not needed
if (r_cut_i <= Scalar(0.0))
continue;
// stash the r_cutsq before any diameter shifting
const Scalar r_cutsq_i = r_cut_i*r_cut_i;
// the rlist to use for the AABB search has to be at least as big as the biggest diameter
Scalar r_list_i = r_cut_i;
if (diameter_shift)
r_list_i += max_diam - Scalar(1.0);
const unsigned int cur_tree_root = d_tree_roots[cur_pair_type];
// skip this type if we don't have it
if (cur_tree_root == NLIST_GPU_INVALID_NODE)
continue;
for (unsigned int cur_image = 0; cur_image < nimages; ++cur_image)
{
const Scalar3 pos_i_image = pos_i + d_image_list[cur_image];
const Scalar3 aabb_upper = make_scalar3(pos_i_image.x + r_list_i,
pos_i_image.y + r_list_i,
pos_i_image.z + r_list_i);
const Scalar3 aabb_lower = make_scalar3(pos_i_image.x - r_list_i,
pos_i_image.y - r_list_i,
pos_i_image.z - r_list_i);
// stackless search
int cur_node_idx = cur_tree_root;
while (cur_node_idx > -1)
{
const Scalar4 upper_rope = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx);
const Scalar4 lower_np = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx+1);
if (!(aabb_upper.x < lower_np.x
|| aabb_lower.x > upper_rope.x
|| aabb_upper.y < lower_np.y
|| aabb_lower.y > upper_rope.y
|| aabb_upper.z < lower_np.z
|| aabb_lower.z > upper_rope.z))
{
const unsigned int np_child_masked = __scalar_as_int(lower_np.w);
if(!(np_child_masked & 1))
{
// leaf node
// all leaves must have at least 1 particle, so we can use this to decide
const unsigned int node_head = NLIST_GPU_PARTICLES_PER_LEAF*cur_node_idx - s_leaf_offset[cur_pair_type];
const unsigned int n_part = np_child_masked >> 1;
for (unsigned int cur_p = node_head; cur_p < node_head + n_part; ++cur_p)
{
// neighbor j
const Scalar4 cur_xyzf = texFetchScalar4(d_leaf_xyzf, leaf_xyzf_tex, cur_p);
const Scalar3 pos_j = make_scalar3(cur_xyzf.x, cur_xyzf.y, cur_xyzf.z);
const unsigned int j = __scalar_as_int(cur_xyzf.w);
const Scalar2 cur_db = texFetchScalar2(d_leaf_db, leaf_db_tex, cur_p);
const Scalar diam_j = cur_db.x;
const unsigned int body_j = __scalar_as_int(cur_db.y);
bool excluded = (my_pidx == j);
if (filter_body && body_i != 0xffffffff)
excluded = excluded | (body_i == body_j);
if (!excluded)
{
// now we can trim down the actual particles based on diameter
// compute the shift for the cutoff if not excluded
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (diam_i + diam_j) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_cut_i) * delta;
}
// compute distance and wrap back into box
Scalar3 drij = pos_j - pos_i_image;
Scalar dr2 = dot(drij,drij);
if (dr2 <= (r_cutsq_i + sqshift))
{
if (n_neigh_i < s_Nmax[type_i])
{
d_nlist[nlist_head_i + n_neigh_i] = j;
}
++n_neigh_i;
}
}
}
// leaf nodes always move to their rope
cur_node_idx = __scalar_as_int(upper_rope.w);
}
else
{
// internal node, take left child
cur_node_idx = (np_child_masked >> 1);
}
}
else
{
cur_node_idx = __scalar_as_int(upper_rope.w); // no overlap, rope ahead
}
} // end stackless search
} // end loop over images
} // end loop over pair types
// could try reordering by idx instead of pidx, but that seems to not make much difference in microbenchmarking.
d_n_neigh[my_pidx] = n_neigh_i;
d_last_updated_pos[my_pidx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __scalar_as_int(type_i));
// update the number of neighbors for this type if allocated memory is exceeded
if (n_neigh_i >= s_Nmax[type_i])
atomicMax(&d_conditions[type_i], n_neigh_i);
}
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
* \param filter_body True if body filtering is enabled
* \param diameter_shift True if rcut(i,j) should be shifted by the particle diameters
* \param compute_capability Compute capability of the GPU (in 20, 30, 35 format)
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
* \returns hipError_t on failure to texture bind
*
* \note Kernel calls are templated on body filtering and diameter shifting for optimization.
* \note One thread is called for all leaf particles. Some of these threads will die because they correspond to ghost
* particles not owned by the rank. Because the leaf particles are sorted, there is no easy way to skip these
* particles, and this inefficiency is assumed to be relatively small.
*/
hipError_t gpu_nlist_traverse_tree(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int nnodes,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes,
bool filter_body,
bool diameter_shift,
const unsigned int compute_capability,
const unsigned int block_size)
{
// shared memory = r_list + Nmax
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + 2*sizeof(unsigned int)*ntypes;
// bind the neighborlist texture
if (compute_capability < 35)
{
pdata_pos_tex.normalized = false;
pdata_pos_tex.filterMode = hipFilterModePoint;
hipError_t error = hipBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*(N+nghosts));
if (error != hipSuccess)
return error;
leaf_xyzf_tex.normalized = false;
leaf_xyzf_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, leaf_xyzf_tex, d_leaf_xyzf, sizeof(Scalar4)*(N+nghosts));
if (error != hipSuccess)
return error;
leaf_db_tex.normalized = false;
leaf_db_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, leaf_db_tex, d_leaf_db, sizeof(Scalar2)*(N+nghosts));
if (error != hipSuccess)
return error;
aabb_node_bounds_tex.normalized = false;
aabb_node_bounds_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, aabb_node_bounds_tex, d_tree_aabbs, sizeof(Scalar4)*2*nnodes);
if (error != hipSuccess)
return error;
head_list_tex.normalized = false;
head_list_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, head_list_tex, d_head_list, sizeof(unsigned int)*N);
if (error != hipSuccess)
return error;
}
if (!filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<0>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<0>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<1>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<1>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (!filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<2>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<2>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<3>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<3>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
// unbind the textures
if (compute_capability < 35)
{
hipError_t error = hipUnbindTexture(pdata_pos_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(leaf_xyzf_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(leaf_db_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(aabb_node_bounds_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(head_list_tex);
if (error != hipSuccess)
return error;
}
return hipSuccess;
}
//! Kernel to find divisons between particle types in sorted order
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_pos Particle positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
*
* The starting index for each type of particles is the first particle where the left neighbor is not of the same type.
*/
__global__ void gpu_nlist_get_divisions_kernel(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const unsigned int cur_pidx = d_map_tree_pid[idx];
// get type of the current particle
const Scalar4 cur_postype = d_pos[cur_pidx];
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
// all particles except for the first one should look left
if (idx > 0)
{
const unsigned int left_pidx = d_map_tree_pid[idx - 1];
// get type of the particle to my left
const Scalar4 left_postype = d_pos[left_pidx];
const unsigned int left_type = __scalar_as_int(left_postype.w);
// if the left has a different type, then this is a type boundary, and the type starts at the current thread index
if (left_type != cur_type)
{
d_type_head[cur_type] = idx + 1; // offset the index +1 so that we can use 0 to mean "none of this found"
}
}
else // the first particle just sets its type to be 1
{
d_type_head[cur_type] = 1;
}
}
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_num_per_type Number of particles per type
* \param d_leaf_offset Offset for reading particles out of leaf order
* \param d_tree_roots Root node of each tree
* \param d_pos Particles positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
* \param ntypes Number of types
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_init_count(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
// apply the scan
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_get_divisions_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// zero out the head list
hipMemset(d_type_head, 0, sizeof(unsigned int)*ntypes);
// get the head list divisions
hipLaunchKernelGGL(( gpu_nlist_get_divisions_kernel), dim3(N/run_block_size + 1), dim3(run_block_size), 0, 0, d_type_head, d_pos, d_map_tree_pid, N);
return hipSuccess;
}
#undef MORTON_CODE_BITS
#undef MORTON_TYPE_MASK_64
#undef MORTON_CODE_N_BINS
| 01e7b3334ba00ad67e7228e4dc6a088d5bdeb480.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: mphoward
#include "NeighborListGPUTree.cuh"
#include "TextureTools.h"
#include "cub/cub.cuh"
#define MORTON_CODE_BITS 30 //!< Length of the Morton code in bits (k = 10 bits per direction)
#define MORTON_CODE_N_BINS 1024 //!< Number of bins (2^10) per direction to generate 30 bit Morton codes
#define MORTON_TYPE_MASK_64 0x000000003fffffffu //!< 64 bit mask to turn morton code-type back to morton code
/*! \file NeighborListGPUTree.cu
\brief Defines GPU kernel code for neighbor list tree traversal on the GPU
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading leaf data
scalar4_tex_t leaf_xyzf_tex;
//! Texture for the diameter / body
scalar2_tex_t leaf_db_tex;
//! Texture for reading node upper and lower bounds
scalar4_tex_t aabb_node_bounds_tex;
//! Texture for the head list
texture<unsigned int, 1, cudaReadModeElementType> head_list_tex;
//!< Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit.
/*!
* \param v unsigned integer with 10 bits set
* \returns The integer expanded with two zeros interleaved between bits
* http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/
*/
__device__ inline unsigned int expandBits(unsigned int v)
{
v = (v * 0x00010001u) & 0xFF0000FFu;
v = (v * 0x00000101u) & 0x0F00F00Fu;
v = (v * 0x00000011u) & 0xC30C30C3u;
v = (v * 0x00000005u) & 0x49249249u;
return v;
}
//! Assigns the Morton code-type key for each particle on this processor
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
*
* \b Implementation
* A sorting key is generated for each particle by determining the 30 bit Morton code for each particle, and then
* concatenating onto the type. Both the Morton code and the type are 32 bit integers, so the concatenation is stored
* compactly in a 64 bit integer morton_type = (type << 30) + morton code. In this way, a lexicographic sort will
* sort first by type, then by morton code. The corresponding particle id (thread index) is stashed into d_map_tree_pid
* to track particles after sorting.
*/
__global__ void gpu_nlist_morton_types_kernel(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim box,
const Scalar3 ghost_width)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N+nghosts)
return;
// acquire particle data
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const unsigned int type = __scalar_as_int(postype.w);
// get position in simulation box
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
/* check if the particle is inside the unit cell + ghost layer in all dimensions
* this tolerance is small enough that when we multiply by the morton code bin size, we are still in range
* we silently ignore ghosts outside of this width, and instead deal with that special case below
* where extra ghosts are communicated (e.g. for bonded interactions)
*/
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) && idx < N)
{
*d_morton_conditions = idx;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * MORTON_CODE_N_BINS);
int jb = (int)(f.y * MORTON_CODE_N_BINS);
int kb = (int)(f.z * MORTON_CODE_N_BINS);
if (!periodic.x) // ghosts exist and may be past layer width
{
// handle special cases where random ghosts are beyond the expected layer
// by just rounding to the nearest edge
if (ib < 0)
{
ib = 0;
}
else if (ib >= MORTON_CODE_N_BINS)
{
ib = MORTON_CODE_N_BINS - 1;
}
}
else if (ib == MORTON_CODE_N_BINS) // some particles lie exactly on the edge, floor them to zero
{
ib = 0;
}
// do as for x in y
if (!periodic.y)
{
if (jb < 0)
{
jb = 0;
}
else if (jb >= MORTON_CODE_N_BINS)
{
jb = MORTON_CODE_N_BINS - 1;
}
}
else if (jb == MORTON_CODE_N_BINS)
{
jb = 0;
}
// do as for y in z
if (!periodic.z)
{
if (kb < 0)
{
kb = 0;
}
else if (kb >= MORTON_CODE_N_BINS)
{
kb = MORTON_CODE_N_BINS - 1;
}
}
else if (kb == MORTON_CODE_N_BINS)
{
kb = 0;
}
// inline call to some bit swizzling arithmetic
unsigned int ii = expandBits((unsigned int)ib);
unsigned int jj = expandBits((unsigned int)jb);
unsigned int kk = expandBits((unsigned int)kb);
unsigned int morton_code = ii * 4 + jj * 2 + kk;
// save the morton code and corresponding particle index for sorting
// the morton codes hold both the type and the code to sort by both type and position simultaneously
d_morton_types[idx] = (((uint64_t)type) << MORTON_CODE_BITS) + (uint64_t)morton_code;
d_map_tree_pid[idx] = idx;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
* \param block_size Requested thread block size of kernel launch
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_morton_types(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim& box,
const Scalar3 ghost_width,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_morton_types_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
gpu_nlist_morton_types_kernel<<<(N+nghosts)/run_block_size + 1, run_block_size>>>(d_morton_types,
d_map_tree_pid,
d_morton_conditions,
d_pos,
N,
nghosts,
box,
ghost_width);
return cudaSuccess;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_morton_types_alt Auxiliary array of equal size to d_morton_types for double buffered sorting
* \param d_map_tree_pid List of particle ids
* \param d_map_tree_pid_alt Auxiliary array of equal size to d_map_tree_pid for double buffered sorting
* \param d_tmp_storage Temporary storage in device memory
* \param tmp_storage_bytes Number of bytes allocated for temporary storage
* \param swap_morton Flag to switch real data from auxiliary array to primary array after sorting
* \param swap_map Flag to switch real data from auxiliary array to primary array after sorting
* \param Ntot Total number of keys to sort
* \param n_type_bits Number of bits to check for particle types
*
* \returns cudaSuccess on completion
*
* \b Implementation
* The CUB library is used for device-wide radix sorting. Radix sorting is O(kN) where k is the number of bits to check
* in an unsigned integer key, and N is the number of keys. We restrict the number of bits checked in the max 64 bit
* keys by only checking up to the MORTON_CODE_BITS + n_type_bits most significant bit. CUB DeviceRadixSort performs
* its own tuning at run time.
*
* Because CUB requires temporary storage, this function must be called twice. First, when \a d_tmp_storage is NULL,
* the number of bytes required for temporary storage is saved in \a tmp_storage_bytes. This memory must then be
* allocated in \a d_tmp_storage. On the second call, the radix sort is performed. Because the radix sort may put the
* active (sorted) buffer in either slot of the DoubleBuffer, a boolean flag is set in \a swap_morton and \a swap_map
* for whether these data arrays should be swapped.
*/
cudaError_t gpu_nlist_morton_sort(uint64_t *d_morton_types,
uint64_t *d_morton_types_alt,
unsigned int *d_map_tree_pid,
unsigned int *d_map_tree_pid_alt,
void *d_tmp_storage,
size_t &tmp_storage_bytes,
bool &swap_morton,
bool &swap_map,
const unsigned int Ntot,
const unsigned int n_type_bits)
{
// initialize memory as "double buffered"
cub::DoubleBuffer<uint64_t> d_keys(d_morton_types, d_morton_types_alt);
cub::DoubleBuffer<unsigned int> d_vals(d_map_tree_pid, d_map_tree_pid_alt);
// on the first pass, this just sizes the temporary storage
// on the second pass, it actually does the radix sort
cub::DeviceRadixSort::SortPairs(d_tmp_storage,
tmp_storage_bytes,
d_keys,
d_vals,
Ntot,
0,
MORTON_CODE_BITS+n_type_bits);
// we've only done something to the buffers on the second time when temporary storage is allocated
if (d_tmp_storage != NULL)
{
// mark that the gpu arrays should be flipped if the final result is not in the right array
swap_morton = (d_keys.selector == 1);
swap_map = (d_vals.selector == 1);
}
return cudaSuccess;
}
//! Kernel to merge adjacent codes into leaf nodes
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread per leaf is called, and is responsible for merging NLIST_GPU_PARTICLES_PER_LEAF into an AABB. Each thread
* first determines what type of leaf particle it is operating on by calculating and iterating on the number of leafs
* of each type. Then, the starting index is determined by subtracting d_leaf_offset[type] from the starting index that
* would be set in a nleaf x NLIST_GPU_PARTICLES_PER_LEAF array. The reason for this complexity is that the leaf particle
* array is not permitted to have any "holes" in it for faster traversal. The AABB is merged from the particle
* positions, and a Morton code is assigned to this AABB for determining tree hierarchy based on the Morton code of
* the first particle in the leaf. Although this does not necessarily generate the best ordering along the Z order curve
* for the newly merged leafs, it does guarantee that the leaf Morton codes are still in lexicographic ordering.
*
* AABBs are stored as two Scalar4s in a flat array. The first three coordinates of each Scalar4 correspond to the upper
* and lower bounds of the AABB. The last value of the upper AABB will hold a "rope" for traversing the tree (see
* gpu_nlist_bubble_aabbs_kernel), while the last value of the lower AABB holds the number of particles for a leaf node,
* or the left child for an internal node. This is determined by setting a bit to mark this value as a rope or as child.
*/
__global__ void gpu_nlist_merge_particles_kernel(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs)
{
// leaf index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per leaf
if (idx >= nleafs)
return;
// get what type of leaf I am
unsigned int total_bins = 0;
int leaf_type = -1;
unsigned int max_idx = Ntot;
for (unsigned int cur_type=0; leaf_type == -1 && cur_type < ntypes; ++cur_type)
{
total_bins += (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (idx < total_bins)
{
leaf_type = cur_type;
for (unsigned int next_type=cur_type+1; next_type < ntypes; ++next_type)
{
if (d_type_head[next_type])
{
max_idx = d_type_head[next_type] - 1;
break; // quit out of this inner loop once a match is found
}
}
break; // quit the outer loop
}
}
// get the starting particle index assuming naive leaf structure, and then subtract offset to eliminate "holes"
unsigned int start_idx = idx*NLIST_GPU_PARTICLES_PER_LEAF - d_leaf_offset[leaf_type];
unsigned int end_idx = (max_idx - start_idx > NLIST_GPU_PARTICLES_PER_LEAF) ? start_idx + NLIST_GPU_PARTICLES_PER_LEAF : max_idx;
// upper also holds the skip value, but we have no idea what this is right now
Scalar4 upper = d_pos[ d_map_tree_pid[start_idx] ];
upper.w = 0.0f;
// lower holds the particle number, we have one already
Scalar4 lower = upper;
unsigned int npart = 1;
for (unsigned int cur_p=start_idx+1; cur_p < end_idx; ++cur_p)
{
Scalar4 cur_pos = d_pos[ d_map_tree_pid[cur_p] ];
// merge the boxes together
if (cur_pos.x < lower.x) lower.x = cur_pos.x;
if (cur_pos.x > upper.x) upper.x = cur_pos.x;
if (cur_pos.y < lower.y) lower.y = cur_pos.y;
if (cur_pos.y > upper.y) upper.y = cur_pos.y;
if (cur_pos.z < lower.z) lower.z = cur_pos.z;
if (cur_pos.z > upper.z) upper.z = cur_pos.z;
++npart;
}
d_tree_aabbs[2*idx] = upper;
d_tree_aabbs[2*idx + 1] = make_scalar4(lower.x, lower.y, lower.z, __int_as_scalar(npart << 1));
// take logical AND with the 30 bit mask for the morton codes to extract just the morton code
// no sense swinging around 64 bit integers anymore
d_morton_codes_red[idx] = (unsigned int)(d_morton_types[start_idx] & MORTON_TYPE_MASK_64);
// fill the parent/sib relationships as if everything is a single leaf at first, to be overridden by hierarchy gen
// when this is not the case
d_tree_parent_sib[idx] = make_uint2(idx, idx << 1);
}
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_merge_particles(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_merge_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
gpu_nlist_merge_particles_kernel<<<nleafs/run_block_size + 1, block_size>>>(d_tree_aabbs,
d_morton_codes_red,
d_tree_parent_sib,
d_morton_types,
d_pos,
d_num_per_type,
ntypes,
d_map_tree_pid,
d_leaf_offset,
d_type_head,
Ntot,
nleafs);
return cudaSuccess;
}
//! Computes the longest common prefix between Morton codes
/*!
* \param d_morton_codes Array of Morton codes
* \param i First Morton code index
* \param j Second Morton code index
* \param min_idx The smallest index considered "in range" (inclusive)
* \param max_idx The last index considered "in range" (inclusive)
*
* \returns number of bits shared between the Morton codes of i and j
*
* delta(i,j) is defined as the largest number of bits shared between Morton codes i and j. When the Morton codes are
* sorted, this implies delta(i',j') >= delta(i,j) for any i',j' in [i,j]. If i and j lie outside
* of the range of Morton codes corresponding to this tree, then it always returns -1. If the Morton codes for i and j
* are identical, then the longest prefix of i and j is used as a tie breaker.
*/
__device__ inline int delta(const uint32_t *d_morton_codes,
unsigned int i,
unsigned int j,
int min_idx,
int max_idx)
{
if (j > max_idx || j < min_idx)
{
return -1;
}
uint32_t first_code = d_morton_codes[i];
uint32_t last_code = d_morton_codes[j];
// if codes match, then use index as tie breaker
// the number of shared bits is equal to the 32 bits in the integer, plus the number of bits shared between the
// indexes (offset from the start of the node range to make things simpler)
if (first_code == last_code)
{
return (32 + __clz((i-min_idx) ^ (j-min_idx)));
}
else
{
return __clz(first_code ^ last_code);
}
}
//! Determines the range of Morton codes that a node covers
/*!
* \param d_morton_codes Array of Morton codes
* \param min_idx The smallest Morton code index considered "in range" (inclusive)
* \param max_idx The last Morton code index considered "in range" (inclusive)
* \param idx Current node (Morton code) index
*
* \returns the minimum and maximum leafs covered by this node
* \note This is a literal implementation of the Karras pseudocode, with no optimizations or refinement.
* Tero Karras, "Maximizing parallelism in the construction of BVHs, octrees, and k-d trees",
* High Performance Graphics (2012).
*/
__device__ inline uint2 determineRange(const uint32_t *d_morton_codes,
const int min_idx,
const int max_idx,
const int idx)
{
int forward_prefix = delta(d_morton_codes, idx, idx+1, min_idx, max_idx);
int backward_prefix = delta(d_morton_codes, idx, idx-1, min_idx, max_idx);
// get direction of the range based on sign
int d = ((forward_prefix - backward_prefix) > 0) ? 1 : -1;
// get minimum prefix
int min_prefix = delta(d_morton_codes, idx, idx-d, min_idx, max_idx);
// get maximum prefix by binary search
int lmax = 2;
while( delta(d_morton_codes, idx, idx + d*lmax, min_idx, max_idx) > min_prefix)
{
lmax = lmax << 1;
}
unsigned int len = 0;
unsigned int step = lmax;
do
{
step = step >> 1;
unsigned int new_len = len + step;
if (delta(d_morton_codes, idx, idx + d*new_len, min_idx, max_idx) > min_prefix)
len = new_len;
}
while (step > 1);
// order range based on direction
uint2 range;
if (d > 0)
{
range.x = idx;
range.y = idx + len;
}
else
{
range.x = idx - len;
range.y = idx;
}
return range;
}
//! Finds the split position in Morton codes covered by a range
/*!
* \param d_morton_codes Array of Morton codes
* \param first First leaf node in the range
* \param last Last leaf node in the range
*
* \returns the leaf index corresponding to the split in Morton codes
* See determineRange for original source of algorithm.
*/
__device__ inline unsigned int findSplit(const uint32_t *d_morton_codes,
const unsigned int first,
const unsigned int last)
{
uint32_t first_code = d_morton_codes[first];
uint32_t last_code = d_morton_codes[last];
// if codes match, then just split evenly
if (first_code == last_code)
return (first + last) >> 1;
// get the length of the common prefix
int common_prefix = __clz(first_code ^ last_code);
// assume split starts at first, and begin binary search
unsigned int split = first;
unsigned int step = last - first;
do
{
// exponential decrease (is factor of 2 best?)
step = (step + 1) >> 1;
unsigned int new_split = split + step;
// if proposed split lies within range
if (new_split < last)
{
unsigned int split_code = d_morton_codes[new_split];
int split_prefix = __clz(first_code ^ split_code);
// if new split shares a longer number of bits, accept it
if (split_prefix > common_prefix)
{
split = new_split;
}
}
}
while (step > 1);
return split;
}
//! Kernel to generate the parent-child-sibling relationships between nodes
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
*
* \b Implementation
* One thread is called per internal node in a single kernel launch. Each thread first determines its "local" index
* as an internal node within a tree based on the number of leafs per tree. The range of leafs covered by the internal
* node is determined, and then its split position is identified. The split identifies the children of the node as
* another internal node or as a leaf node.
*
* The parent and sibling of each child node is saved. The sibling id is bit shifted so as to use a single bit to encode
* the sibling as a right child or left child (after shifting, we set the bit to 1 if the sibling is a right child).
* If the child is a root node, it also saves information for itself (since no other node ever identifies a root as a
* child node).
*/
__global__ void gpu_nlist_gen_hierarchy_kernel(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal)
{
// compute the internal node index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per internal node
if (idx >= ninternal)
return;
// get what type of leaf I am
unsigned int min_idx = 0; // the "0" of the leaf node array
unsigned int max_idx = 0; // the "N-1" of the leaf node array
unsigned int node_idx = idx;
unsigned int origin = 0;
unsigned int end = 0;
unsigned int cur_type=0;
unsigned int active_types=0;
for (cur_type=0; cur_type < ntypes; ++cur_type)
{
// current min index is the previous max index
min_idx = max_idx;
// max index adds the number of internal nodes in this type (nleaf - 1)
const unsigned int cur_nleaf = (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (cur_nleaf > 0)
{
max_idx += cur_nleaf-1;
++active_types;
}
// we break the loop if we are in range
if (idx < max_idx)
{
// decrement by 1 to get this back into the number we really need
--active_types;
// now, we repurpose the min and max index to now correspond to the *leaf* index.
// the min index is the minimum *leaf* index
origin = min_idx + active_types;
end = max_idx + active_types;
node_idx += active_types;
break;
}
}
// enact the magical split determining
uint2 range = determineRange(d_morton_codes, origin, end, node_idx);
unsigned int first = range.x;
unsigned int last = range.y;
unsigned int split = findSplit(d_morton_codes, first, last);
uint2 children;
// set the children, shifting ahead by nleafs - cur_type to account for leaf shifting
// this factor comes out from resetting 0 = N_leaf,i each time, and then remapping this to
// an internal node
children.x = (split == first) ? split : (nleafs - active_types + split);
children.y = ((split + 1) == last) ? (split + 1) : nleafs - active_types + split + 1;
uint2 parent_sib;
parent_sib.x = nleafs + idx;
// encode the sibling as the right child
parent_sib.y = children.y << 1;
parent_sib.y |= 1;
d_tree_parent_sib[children.x] = parent_sib;
// encode the sibling as the left child
parent_sib.y = children.x << 1;
d_tree_parent_sib[children.y] = parent_sib;
// root is always number "zero", but only it can set its parent / sibling
// we mark both of these as the root for traversing, since only the root node
// will be its own sibling
if (node_idx == origin)
{
parent_sib.x = nleafs + idx;
parent_sib.y = (nleafs + idx) << 1;
d_tree_parent_sib[nleafs + idx] = parent_sib;
}
}
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_gen_hierarchy(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_gen_hierarchy_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// one thread per internal node
gpu_nlist_gen_hierarchy_kernel<<<ninternal/run_block_size + 1, run_block_size>>>(d_tree_parent_sib,
d_morton_codes,
d_num_per_type,
ntypes,
nleafs,
ninternal);
return cudaSuccess;
}
//! Kernel to bubble up enclosing AABBs to internal nodes from leaf nodes
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread is called per leaf node. The second thread to reach an internal node processes its two children,
* which guarantees that no node AABB is prematurely processed. The arrival order at a node is controlled by an atomic
* thread lock in global memory. This locking could be accelerated by using shared memory whenever a node is being
* processed by threads in the same block.
*
* When processing the node, the thread also walks up the tree to find the "rope" that tells a traverser
* how to navigate the tree. If a query AABB intersects the current node, then the traverser always moves the the left
* child of the current node. If the AABB does not intersect, it moves along the "rope" to the next portion of the tree.
* The "rope" is calculated by walking back up the tree to find the earliest ancestor that is a left child of its
* parent. The rope then goes to that ancestor's sibling. If the root node is reached, then the rope is set to -1 to
* indicate traversal should be aborted.
*
* This kernel also encodes the left child of a node into the AABB for internal nodes. The thread processing the node
* checks if it arrived from a left child or right child of the node it is processing, and sets the left child of that
* parent accordingly. A child is indicated by bit shifting, and setting the first bit to 1.
*/
__global__ void gpu_nlist_bubble_aabbs_kernel(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs)
{
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nleafs)
return;
// okay, first we start from the leaf and set my bounding box
Scalar4 cur_upper = d_tree_aabbs[2*idx];
Scalar4 cur_lower = d_tree_aabbs[2*idx+1];
// zero the counters for internal nodes
cur_upper.w = 0.0f;
cur_lower.w = 0.0f;
unsigned int cur_node = idx;
unsigned int lock_key = 0;
do
{
uint2 cur_parent_sib = d_tree_parent_sib[cur_node];
unsigned int cur_parent = cur_parent_sib.x;
// if the current sibling is a right child, then the current node is a left child
bool cur_is_left = (cur_parent_sib.y & 1);
unsigned int cur_sibling = cur_parent_sib.y >> 1;
// first we compute the skip for this node always
// back track up the tree until you find a left child
// we have a check in place so that we don't stall on the root node
uint2 backtrack = cur_parent_sib;
while (!(backtrack.y & 1) && backtrack.x != (backtrack.y >> 1))
{
backtrack = d_tree_parent_sib[backtrack.x];
}
// then, the skip is to the sibling of that node, or else to quit
if (backtrack.y & 1)
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(backtrack.y >> 1);
}
else
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(-1.0);
}
// then, we do an atomicAdd on the lock to see if we need to process the parent AABBs
// check to make sure the parent is bigger than nleafs, or else the node lock always fails
// so that we terminate the thread
lock_key = (cur_parent >= nleafs) ? atomicAdd(d_node_locks + cur_parent - nleafs, 1) : 0;
// process the node
if (lock_key == 1)
{
// compute the max upper bound
Scalar4 sib_upper = d_tree_aabbs[2*cur_sibling];
if (sib_upper.x > cur_upper.x) cur_upper.x = sib_upper.x;
if (sib_upper.y > cur_upper.y) cur_upper.y = sib_upper.y;
if (sib_upper.z > cur_upper.z) cur_upper.z = sib_upper.z;
d_tree_aabbs[2*cur_parent] = cur_upper;
// compute the min lower bound
Scalar4 sib_lower = d_tree_aabbs[2*cur_sibling+1];
if (sib_lower.x < cur_lower.x) cur_lower.x = sib_lower.x;
if (sib_lower.y < cur_lower.y) cur_lower.y = sib_lower.y;
if (sib_lower.z < cur_lower.z) cur_lower.z = sib_lower.z;
// this must always be some internal node, so stash the left child of this node here
unsigned int left_child_masked = ((cur_is_left ? cur_node : cur_sibling) << 1) | 1;
cur_lower.w = __int_as_scalar( left_child_masked );
d_tree_aabbs[2*cur_parent+1] = cur_lower;
// bump the current node one level
cur_node = cur_parent;
}
}
while (lock_key == 1);
}
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_bubble_aabbs(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
cudaMemset(d_node_locks, 0, sizeof(unsigned int)*ninternal);
gpu_nlist_bubble_aabbs_kernel<<<nleafs/block_size + 1, block_size>>>(d_node_locks,
d_tree_aabbs,
d_tree_parent_sib,
ntypes,
nleafs);
return cudaSuccess;
}
//! Kernel to rearrange particle data into leaf order for faster traversal
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
*
* \b Implementation
* One thread per particle is called. Writes are coalesced by writing in leaf order, and reading in a scattered way.
*/
__global__ void gpu_nlist_move_particles_kernel(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot)
{
// get thread index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= Ntot)
return;
// read and write particle data
unsigned int p_idx = d_map_tree_pid[idx];
Scalar4 pos_i = d_pos[p_idx];
d_leaf_xyzf[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(p_idx));
Scalar2 db = make_scalar2(d_diameter[p_idx], __int_as_scalar(d_body[p_idx]));
d_leaf_db[idx] = db;
}
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_move_particles(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_move_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
gpu_nlist_move_particles_kernel<<<Ntot/run_block_size + 1, run_block_size>>>(d_leaf_xyzf,
d_leaf_db,
d_pos,
d_diameter,
d_body,
d_map_tree_pid,
Ntot);
return cudaSuccess;
}
//! Kernel for traversing tree to generate neighbor list
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
*
* \b Implementation
* One thread is launched per particle, but the threads operate on particles in leaf order rather than ParticleData
* order in order to minimize divergence within a warp (particles in the same leaf should intersect similar parts of the
* tree). Each thread iterates on the particle types (trees) and queries on all translation vectors using a stackless
* search. When the query AABB intersects a node AABB, the node AABB is checked to be an internal node or a leaf node.
* If an internal node, then the traversal advances to that node's left child. If a leaf node, the leaf particles are
* tested directly to be included in the neighbor list. The node then advances along that leaf node's rope. If the AABB
* is not intersected, the traversal advances along the rope. This process proceeds until a rope signals that the
* traversal is complete.
*/
template<unsigned char flags>
__global__ void gpu_nlist_traverse_tree_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
const Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
unsigned int *s_leaf_offset = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters + sizeof(unsigned int)*ntypes]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
s_leaf_offset[cur_offset + threadIdx.x] = d_leaf_offset[cur_offset + threadIdx.x];
}
}
__syncthreads();
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the leaf list
if (idx >= (N+nghosts))
return;
// read in the current position
unsigned int my_pidx = d_map_tree_pid[idx];
// we only process particles owned by this processor for neighbors
if (my_pidx >= N)
return;
const Scalar4 postype_i = texFetchScalar4(d_pos, pdata_pos_tex, my_pidx);
const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z);
const unsigned int type_i = __scalar_as_int(postype_i.w);
// fetch the diameter and body out of the leaf texture since it's bound anyway
const Scalar2 db_i = texFetchScalar2(d_leaf_db, leaf_db_tex, idx);
const Scalar diam_i = db_i.x;
const unsigned int body_i = __scalar_as_int(db_i.y);
const unsigned int nlist_head_i = texFetchUint(d_head_list, head_list_tex, my_pidx);
unsigned int n_neigh_i = 0;
for (unsigned int cur_pair_type=0; cur_pair_type < ntypes; ++cur_pair_type)
{
// Check primary box
const Scalar r_cut_i = s_r_list[typpair_idx(type_i,cur_pair_type)];
// Skip this tree type if it is not needed
if (r_cut_i <= Scalar(0.0))
continue;
// stash the r_cutsq before any diameter shifting
const Scalar r_cutsq_i = r_cut_i*r_cut_i;
// the rlist to use for the AABB search has to be at least as big as the biggest diameter
Scalar r_list_i = r_cut_i;
if (diameter_shift)
r_list_i += max_diam - Scalar(1.0);
const unsigned int cur_tree_root = d_tree_roots[cur_pair_type];
// skip this type if we don't have it
if (cur_tree_root == NLIST_GPU_INVALID_NODE)
continue;
for (unsigned int cur_image = 0; cur_image < nimages; ++cur_image)
{
const Scalar3 pos_i_image = pos_i + d_image_list[cur_image];
const Scalar3 aabb_upper = make_scalar3(pos_i_image.x + r_list_i,
pos_i_image.y + r_list_i,
pos_i_image.z + r_list_i);
const Scalar3 aabb_lower = make_scalar3(pos_i_image.x - r_list_i,
pos_i_image.y - r_list_i,
pos_i_image.z - r_list_i);
// stackless search
int cur_node_idx = cur_tree_root;
while (cur_node_idx > -1)
{
const Scalar4 upper_rope = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx);
const Scalar4 lower_np = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx+1);
if (!(aabb_upper.x < lower_np.x
|| aabb_lower.x > upper_rope.x
|| aabb_upper.y < lower_np.y
|| aabb_lower.y > upper_rope.y
|| aabb_upper.z < lower_np.z
|| aabb_lower.z > upper_rope.z))
{
const unsigned int np_child_masked = __scalar_as_int(lower_np.w);
if(!(np_child_masked & 1))
{
// leaf node
// all leaves must have at least 1 particle, so we can use this to decide
const unsigned int node_head = NLIST_GPU_PARTICLES_PER_LEAF*cur_node_idx - s_leaf_offset[cur_pair_type];
const unsigned int n_part = np_child_masked >> 1;
for (unsigned int cur_p = node_head; cur_p < node_head + n_part; ++cur_p)
{
// neighbor j
const Scalar4 cur_xyzf = texFetchScalar4(d_leaf_xyzf, leaf_xyzf_tex, cur_p);
const Scalar3 pos_j = make_scalar3(cur_xyzf.x, cur_xyzf.y, cur_xyzf.z);
const unsigned int j = __scalar_as_int(cur_xyzf.w);
const Scalar2 cur_db = texFetchScalar2(d_leaf_db, leaf_db_tex, cur_p);
const Scalar diam_j = cur_db.x;
const unsigned int body_j = __scalar_as_int(cur_db.y);
bool excluded = (my_pidx == j);
if (filter_body && body_i != 0xffffffff)
excluded = excluded | (body_i == body_j);
if (!excluded)
{
// now we can trim down the actual particles based on diameter
// compute the shift for the cutoff if not excluded
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (diam_i + diam_j) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_cut_i) * delta;
}
// compute distance and wrap back into box
Scalar3 drij = pos_j - pos_i_image;
Scalar dr2 = dot(drij,drij);
if (dr2 <= (r_cutsq_i + sqshift))
{
if (n_neigh_i < s_Nmax[type_i])
{
d_nlist[nlist_head_i + n_neigh_i] = j;
}
++n_neigh_i;
}
}
}
// leaf nodes always move to their rope
cur_node_idx = __scalar_as_int(upper_rope.w);
}
else
{
// internal node, take left child
cur_node_idx = (np_child_masked >> 1);
}
}
else
{
cur_node_idx = __scalar_as_int(upper_rope.w); // no overlap, rope ahead
}
} // end stackless search
} // end loop over images
} // end loop over pair types
// could try reordering by idx instead of pidx, but that seems to not make much difference in microbenchmarking.
d_n_neigh[my_pidx] = n_neigh_i;
d_last_updated_pos[my_pidx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __scalar_as_int(type_i));
// update the number of neighbors for this type if allocated memory is exceeded
if (n_neigh_i >= s_Nmax[type_i])
atomicMax(&d_conditions[type_i], n_neigh_i);
}
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
* \param filter_body True if body filtering is enabled
* \param diameter_shift True if rcut(i,j) should be shifted by the particle diameters
* \param compute_capability Compute capability of the GPU (in 20, 30, 35 format)
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
* \returns cudaError on failure to texture bind
*
* \note Kernel calls are templated on body filtering and diameter shifting for optimization.
* \note One thread is called for all leaf particles. Some of these threads will die because they correspond to ghost
* particles not owned by the rank. Because the leaf particles are sorted, there is no easy way to skip these
* particles, and this inefficiency is assumed to be relatively small.
*/
cudaError_t gpu_nlist_traverse_tree(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int nnodes,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes,
bool filter_body,
bool diameter_shift,
const unsigned int compute_capability,
const unsigned int block_size)
{
// shared memory = r_list + Nmax
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + 2*sizeof(unsigned int)*ntypes;
// bind the neighborlist texture
if (compute_capability < 35)
{
pdata_pos_tex.normalized = false;
pdata_pos_tex.filterMode = cudaFilterModePoint;
cudaError_t error = cudaBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*(N+nghosts));
if (error != cudaSuccess)
return error;
leaf_xyzf_tex.normalized = false;
leaf_xyzf_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, leaf_xyzf_tex, d_leaf_xyzf, sizeof(Scalar4)*(N+nghosts));
if (error != cudaSuccess)
return error;
leaf_db_tex.normalized = false;
leaf_db_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, leaf_db_tex, d_leaf_db, sizeof(Scalar2)*(N+nghosts));
if (error != cudaSuccess)
return error;
aabb_node_bounds_tex.normalized = false;
aabb_node_bounds_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, aabb_node_bounds_tex, d_tree_aabbs, sizeof(Scalar4)*2*nnodes);
if (error != cudaSuccess)
return error;
head_list_tex.normalized = false;
head_list_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, head_list_tex, d_head_list, sizeof(unsigned int)*N);
if (error != cudaSuccess)
return error;
}
if (!filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<0>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<0><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<1>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<1><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (!filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<2>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<2><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<3>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<3><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
// unbind the textures
if (compute_capability < 35)
{
cudaError_t error = cudaUnbindTexture(pdata_pos_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(leaf_xyzf_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(leaf_db_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(aabb_node_bounds_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(head_list_tex);
if (error != cudaSuccess)
return error;
}
return cudaSuccess;
}
//! Kernel to find divisons between particle types in sorted order
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_pos Particle positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
*
* The starting index for each type of particles is the first particle where the left neighbor is not of the same type.
*/
__global__ void gpu_nlist_get_divisions_kernel(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const unsigned int cur_pidx = d_map_tree_pid[idx];
// get type of the current particle
const Scalar4 cur_postype = d_pos[cur_pidx];
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
// all particles except for the first one should look left
if (idx > 0)
{
const unsigned int left_pidx = d_map_tree_pid[idx - 1];
// get type of the particle to my left
const Scalar4 left_postype = d_pos[left_pidx];
const unsigned int left_type = __scalar_as_int(left_postype.w);
// if the left has a different type, then this is a type boundary, and the type starts at the current thread index
if (left_type != cur_type)
{
d_type_head[cur_type] = idx + 1; // offset the index +1 so that we can use 0 to mean "none of this found"
}
}
else // the first particle just sets its type to be 1
{
d_type_head[cur_type] = 1;
}
}
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_num_per_type Number of particles per type
* \param d_leaf_offset Offset for reading particles out of leaf order
* \param d_tree_roots Root node of each tree
* \param d_pos Particles positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
* \param ntypes Number of types
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_init_count(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
// apply the scan
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_get_divisions_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// zero out the head list
cudaMemset(d_type_head, 0, sizeof(unsigned int)*ntypes);
// get the head list divisions
gpu_nlist_get_divisions_kernel<<<N/run_block_size + 1, run_block_size>>>(d_type_head, d_pos, d_map_tree_pid, N);
return cudaSuccess;
}
#undef MORTON_CODE_BITS
#undef MORTON_TYPE_MASK_64
#undef MORTON_CODE_N_BINS
|
7c038288a16b7895c35cb446b6766389a31d0f56.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <util.h>
#include <sm_utils.inl>
#include <hip/hip_runtime.h>
#include <allocator.h>
#include "hip/hip_runtime_api.h"
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#include "lulesh.h"
//YKT
//extern "C" void trap(void);
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
__device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__device__ __host__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__device__ __host__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
/*
__device__
static
__forceinline__
void SumOverNodes(Real_t& val, volatile Real_t* smem, int cta_elem, int node) {
int tid = (cta_elem << 3) + node;
smem[tid] = val;
if (node < 4)
{
smem[tid] += smem[tid+4];
smem[tid] += smem[tid+2];
smem[tid] += smem[tid+1];
}
val = smem[(cta_elem << 3)];
}
*/
__device__
static
__forceinline__
void SumOverNodesShfl(Real_t& val) {
val += utils::shfl_xor( val, 4, 8);
val += utils::shfl_xor( val, 2, 8);
val += utils::shfl_xor( val, 1, 8);
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
// 11 + 3*14
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
void cuda_init(int rank)
{
Int_t deviceCount, dev;
hipDeviceProp_t cuda_deviceProp;
cudaSafeCall( hipGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
dev = rank % deviceCount;
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
cudaSafeCall( hipSetDevice(dev) );
struct hipDeviceProp_t props;
hipGetDeviceProperties(&props, dev);
char hostname[256];
gethostname(hostname, sizeof(hostname));
//printf("Host %s using GPU %i: %s\n", hostname, dev, props.name);
cudaSafeCall( hipGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 3) {
fprintf(stderr, "cuda_init(): This implementation of Lulesh requires device SM 3.0+.\n", dev);
exit(1);
}
#if CUDART_VERSION < 5000
fprintf(stderr,"cuda_init(): This implementation of Lulesh uses texture objects, which is requires Cuda 5.0+.\n");
exit(1);
#endif
}
void AllocateNodalPersistent(Domain* domain, size_t domNodes)
{
domain->x.resize(domNodes) ; /* coordinates */
domain->y.resize(domNodes) ;
domain->z.resize(domNodes) ;
domain->xd.resize(domNodes) ; /* velocities */
domain->yd.resize(domNodes) ;
domain->zd.resize(domNodes) ;
domain->xdd.resize(domNodes) ; /* accelerations */
domain->ydd.resize(domNodes) ;
domain->zdd.resize(domNodes) ;
domain->fx.resize(domNodes) ; /* forces */
domain->fy.resize(domNodes) ;
domain->fz.resize(domNodes) ;
domain->nodalMass.resize(domNodes) ; /* mass */
}
void AllocateElemPersistent(Domain* domain, size_t domElems, size_t padded_domElems)
{
domain->matElemlist.resize(domElems) ; /* material indexset */
domain->nodelist.resize(8*padded_domElems) ; /* elemToNode connectivity */
domain->lxim.resize(domElems) ; /* elem connectivity through face */
domain->lxip.resize(domElems) ;
domain->letam.resize(domElems) ;
domain->letap.resize(domElems) ;
domain->lzetam.resize(domElems) ;
domain->lzetap.resize(domElems) ;
domain->elemBC.resize(domElems) ; /* elem face symm/free-surf flag */
domain->e.resize(domElems) ; /* energy */
domain->p.resize(domElems) ; /* pressure */
domain->q.resize(domElems) ; /* q */
domain->ql.resize(domElems) ; /* linear term for q */
domain->qq.resize(domElems) ; /* quadratic term for q */
domain->v.resize(domElems) ; /* relative volume */
domain->volo.resize(domElems) ; /* reference volume */
domain->delv.resize(domElems) ; /* m_vnew - m_v */
domain->vdov.resize(domElems) ; /* volume derivative over volume */
domain->arealg.resize(domElems) ; /* elem characteristic length */
domain->ss.resize(domElems) ; /* "sound speed" */
domain->elemMass.resize(domElems) ; /* mass */
}
void AllocateSymmX(Domain* domain, size_t size)
{
domain->symmX.resize(size) ;
}
void AllocateSymmY(Domain* domain, size_t size)
{
domain->symmY.resize(size) ;
}
void AllocateSymmZ(Domain* domain, size_t size)
{
domain->symmZ.resize(size) ;
}
void InitializeFields(Domain* domain)
{
/* Basic Field Initialization */
thrust::fill(domain->ss.begin(),domain->ss.end(),0.);
thrust::fill(domain->e.begin(),domain->e.end(),0.);
thrust::fill(domain->p.begin(),domain->p.end(),0.);
thrust::fill(domain->q.begin(),domain->q.end(),0.);
thrust::fill(domain->v.begin(),domain->v.end(),1.);
thrust::fill(domain->xd.begin(),domain->xd.end(),0.);
thrust::fill(domain->yd.begin(),domain->yd.end(),0.);
thrust::fill(domain->zd.begin(),domain->zd.end(),0.);
thrust::fill(domain->xdd.begin(),domain->xdd.end(),0.);
thrust::fill(domain->ydd.begin(),domain->ydd.end(),0.);
thrust::fill(domain->zdd.begin(),domain->zdd.end(),0.);
thrust::fill(domain->nodalMass.begin(),domain->nodalMass.end(),0.);
}
////////////////////////////////////////////////////////////////////////////////
void
Domain::SetupCommBuffers(Int_t edgeNodes)
{
// allocate a buffer large enough for nodal ghost data
maxEdgeSize = MAX(this->sizeX, MAX(this->sizeY, this->sizeZ))+1 ;
maxPlaneSize = CACHE_ALIGN_REAL(maxEdgeSize*maxEdgeSize) ;
maxEdgeSize = CACHE_ALIGN_REAL(maxEdgeSize) ;
// assume communication to 6 neighbors by default
m_rowMin = (m_rowLoc == 0) ? 0 : 1;
m_rowMax = (m_rowLoc == m_tp-1) ? 0 : 1;
m_colMin = (m_colLoc == 0) ? 0 : 1;
m_colMax = (m_colLoc == m_tp-1) ? 0 : 1;
m_planeMin = (m_planeLoc == 0) ? 0 : 1;
m_planeMax = (m_planeLoc == m_tp-1) ? 0 : 1;
#if USE_MPI
// account for face communication
Index_t comBufSize =
(m_rowMin + m_rowMax + m_colMin + m_colMax + m_planeMin + m_planeMax) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for edge communication
comBufSize +=
((m_rowMin & m_colMin) + (m_rowMin & m_planeMin) + (m_colMin & m_planeMin) +
(m_rowMax & m_colMax) + (m_rowMax & m_planeMax) + (m_colMax & m_planeMax) +
(m_rowMax & m_colMin) + (m_rowMin & m_planeMax) + (m_colMin & m_planeMax) +
(m_rowMin & m_colMax) + (m_rowMax & m_planeMin) + (m_colMax & m_planeMin)) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for corner communication
// factor of 16 is so each buffer has its own cache line
comBufSize += ((m_rowMin & m_colMin & m_planeMin) +
(m_rowMin & m_colMin & m_planeMax) +
(m_rowMin & m_colMax & m_planeMin) +
(m_rowMin & m_colMax & m_planeMax) +
(m_rowMax & m_colMin & m_planeMin) +
(m_rowMax & m_colMin & m_planeMax) +
(m_rowMax & m_colMax & m_planeMin) +
(m_rowMax & m_colMax & m_planeMax)) * CACHE_COHERENCE_PAD_REAL ;
//hfwen: Why now we cannot do the calls when comBufSize = 0. Is it only on Volta or CUDA9?
if (comBufSize > 0)
{
this->commDataSend = new Real_t[comBufSize] ;
this->commDataRecv = new Real_t[comBufSize] ;
// pin buffers
hipHostRegister(this->commDataSend, comBufSize*sizeof(Real_t), 0);
hipHostRegister(this->commDataRecv, comBufSize*sizeof(Real_t), 0);
// prevent floating point exceptions
memset(this->commDataSend, 0, comBufSize*sizeof(Real_t)) ;
memset(this->commDataRecv, 0, comBufSize*sizeof(Real_t)) ;
// allocate shadow GPU buffers
hipMalloc(&this->d_commDataSend, comBufSize*sizeof(Real_t));
hipMalloc(&this->d_commDataRecv, comBufSize*sizeof(Real_t));
// prevent floating point exceptions
hipMemset(this->d_commDataSend, 0, comBufSize*sizeof(Real_t));
hipMemset(this->d_commDataRecv, 0, comBufSize*sizeof(Real_t));
}
#endif
}
void SetupConnectivityBC(Domain *domain, int edgeElems)
{
int domElems = domain->numElem;
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
/* set up elemement connectivity information */
lxim_h[0] = 0 ;
for (Index_t i=1; i<domElems; ++i) {
lxim_h[i] = i-1 ;
lxip_h[i-1] = i ;
}
lxip_h[domElems-1] = domElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
letam_h[i] = i ;
letap_h[domElems-edgeElems+i] = domElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<domElems; ++i) {
letam_h[i] = i-edgeElems ;
letap_h[i-edgeElems] = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
lzetam_h[i] = i ;
lzetap_h[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<domElems; ++i) {
lzetam_h[i] = i - edgeElems*edgeElems ;
lzetap_h[i-edgeElems*edgeElems] = i ;
}
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
elemBC_h[i] = 0 ; /* clear BCs by default */
}
Index_t ghostIdx[6] ; // offsets to ghost locations
for (Index_t i=0; i<6; ++i) {
ghostIdx[i] = INT_MIN ;
}
Int_t pidx = domElems ;
if (domain->m_planeMin != 0) {
ghostIdx[0] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_planeMax != 0) {
ghostIdx[1] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_rowMin != 0) {
ghostIdx[2] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_rowMax != 0) {
ghostIdx[3] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_colMin != 0) {
ghostIdx[4] = pidx ;
pidx += domain->sizeY*domain->sizeZ ;
}
if (domain->m_colMax != 0) {
ghostIdx[5] = pidx ;
}
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
if (domain->m_planeLoc == 0) {
elemBC_h[rowInc+j] |= ZETA_M_SYMM ;
}
else {
elemBC_h[rowInc+j] |= ZETA_M_COMM ;
lzetam_h[rowInc+j] = ghostIdx[0] + rowInc + j ;
}
if (domain->m_planeLoc == domain->m_tp-1) {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_FREE;
}
else {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_COMM ;
lzetap_h[rowInc+j+domElems-edgeElems*edgeElems] =
ghostIdx[1] + rowInc + j ;
}
if (domain->m_rowLoc == 0) {
elemBC_h[planeInc+j] |= ETA_M_SYMM ;
}
else {
elemBC_h[planeInc+j] |= ETA_M_COMM ;
letam_h[planeInc+j] = ghostIdx[2] + rowInc + j ;
}
if (domain->m_rowLoc == domain->m_tp-1) {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_FREE ;
}
else {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_COMM ;
letap_h[planeInc+j+edgeElems*edgeElems-edgeElems] =
ghostIdx[3] + rowInc + j ;
}
if (domain->m_colLoc == 0) {
elemBC_h[planeInc+j*edgeElems] |= XI_M_SYMM ;
}
else {
elemBC_h[planeInc+j*edgeElems] |= XI_M_COMM ;
lxim_h[planeInc+j*edgeElems] = ghostIdx[4] + rowInc + j ;
}
if (domain->m_colLoc == domain->m_tp-1) {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ;
}
else {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_COMM ;
lxip_h[planeInc+j*edgeElems+edgeElems-1] =
ghostIdx[5] + rowInc + j ;
}
}
}
domain->elemBC = elemBC_h;
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
}
void Domain::BuildMesh(Int_t nx, Int_t edgeNodes, Int_t edgeElems, Int_t domNodes, Int_t padded_domElems, Vector_h<Real_t> &x_h, Vector_h<Real_t> &y_h, Vector_h<Real_t> &z_h, Vector_h<Int_t> &nodelist_h)
{
Index_t meshEdgeElems = m_tp*nx ;
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
// initialize nodal coordinates
Index_t nidx = 0 ;
Real_t tz = Real_t(1.125)*Real_t(m_planeLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t plane=0; plane<edgeNodes; ++plane) {
Real_t ty = Real_t(1.125)*Real_t(m_rowLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t row=0; row<edgeNodes; ++row) {
Real_t tx = Real_t(1.125)*Real_t(m_colLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t col=0; col<edgeNodes; ++col) {
x_h[nidx] = tx ;
y_h[nidx] = ty ;
z_h[nidx] = tz ;
++nidx ;
// tx += ds ; // may accumulate roundoff...
tx = Real_t(1.125)*Real_t(m_colLoc*nx+col+1)/Real_t(meshEdgeElems) ;
}
// ty += ds ; // may accumulate roundoff...
ty = Real_t(1.125)*Real_t(m_rowLoc*nx+row+1)/Real_t(meshEdgeElems) ;
}
// tz += ds ; // may accumulate roundoff...
tz = Real_t(1.125)*Real_t(m_planeLoc*nx+plane+1)/Real_t(meshEdgeElems) ;
}
x = x_h;
y = y_h;
z = z_h;
nodelist_h.resize(padded_domElems*8);
// embed hexehedral elements in nodal point lattice
Index_t zidx = 0 ;
nidx = 0 ;
for (Index_t plane=0; plane<edgeElems; ++plane) {
for (Index_t row=0; row<edgeElems; ++row) {
for (Index_t col=0; col<edgeElems; ++col) {
nodelist_h[0*padded_domElems+zidx] = nidx ;
nodelist_h[1*padded_domElems+zidx] = nidx + 1 ;
nodelist_h[2*padded_domElems+zidx] = nidx + edgeNodes + 1 ;
nodelist_h[3*padded_domElems+zidx] = nidx + edgeNodes ;
nodelist_h[4*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes ;
nodelist_h[5*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + 1 ;
nodelist_h[6*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ;
nodelist_h[7*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes ;
++zidx ;
++nidx ;
}
++nidx ;
}
nidx += edgeNodes ;
}
nodelist = nodelist_h;
}
Domain *NewDomain(char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{
Domain *domain = new Domain ;
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
hipStreamCreate(&(domain->streams[i]));
hipEventCreateWithFlags(&domain->time_constraint_computed,hipEventDisableTiming);
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
hipHostMalloc(&domain->dtcourant_h,sizeof(Real_t),0);
hipHostMalloc(&domain->dthydro_h,sizeof(Real_t),0);
hipHostMalloc(&domain->bad_vol_h,sizeof(Index_t),0);
hipHostMalloc(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
domain->deltatime_h = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}
/******************* to support region *********************/
void Domain::sortRegions(Vector_h<Int_t>& regReps_h, Vector_h<Index_t>& regSorted_h)
{
Index_t temp;
Vector_h<Index_t> regIndex;
regIndex.resize(numReg);
for(int i = 0; i < numReg; i++)
regIndex[i] = i;
for(int i = 0; i < numReg-1; i++)
for(int j = 0; j < numReg-i-1; j++)
if(regReps_h[j] < regReps_h[j+1])
{
temp = regReps_h[j];
regReps_h[j] = regReps_h[j+1];
regReps_h[j+1] = temp;
temp = regElemSize[j];
regElemSize[j] = regElemSize[j+1];
regElemSize[j+1] = temp;
temp = regIndex[j];
regIndex[j] = regIndex[j+1];
regIndex[j+1] = temp;
}
for(int i = 0; i < numReg; i++)
regSorted_h[regIndex[i]] = i;
}
// simple function for int pow x^y, y >= 0
static Int_t POW(Int_t x, Int_t y)
{
Int_t res = 1;
for (Int_t i = 0; i < y; i++)
res *= x;
return res;
}
void Domain::CreateRegionIndexSets(Int_t nr, Int_t b)
{
#if USE_MPI
Index_t myRank;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
srand(myRank);
#else
srand(0);
Index_t myRank = 0;
#endif
numReg = nr;
balance = b;
regElemSize = new Int_t[numReg];
Index_t nextIndex = 0;
Vector_h<Int_t> regCSR_h(regCSR.size()); // records the begining and end of each region
Vector_h<Int_t> regReps_h(regReps.size()); // records the rep number per region
Vector_h<Index_t> regNumList_h(regNumList.size()); // Region number per domain element
Vector_h<Index_t> regElemlist_h(regElemlist.size()); // region indexset
Vector_h<Index_t> regSorted_h(regSorted.size()); // keeps index of sorted regions
//if we only have one region just fill it
// Fill out the regNumList with material numbers, which are always
// the region index plus one
if(numReg == 1) {
while (nextIndex < numElem) {
regNumList_h[nextIndex] = 1;
nextIndex++;
}
regElemSize[0] = 0;
}
//If we have more than one region distribute the elements.
else {
Int_t regionNum;
Int_t regionVar;
Int_t lastReg = -1;
Int_t binSize;
Int_t elements;
Index_t runto = 0;
Int_t costDenominator = 0;
Int_t* regBinEnd = new Int_t[numReg];
//Determine the relative weights of all the regions.
for (Index_t i=0 ; i<numReg ; ++i) {
regElemSize[i] = 0;
costDenominator += POW((i+1), balance); //Total cost of all regions
regBinEnd[i] = costDenominator; //Chance of hitting a given region is (regBinEnd[i] - regBinEdn[i-1])/costDenominator
}
//Until all elements are assigned
while (nextIndex < numElem) {
//pick the region
regionVar = rand() % costDenominator;
Index_t i = 0;
while(regionVar >= regBinEnd[i])
i++;
//rotate the regions based on MPI rank. Rotation is Rank % NumRegions
regionNum = ((i + myRank) % numReg) + 1;
// make sure we don't pick the same region twice in a row
while(regionNum == lastReg) {
regionVar = rand() % costDenominator;
i = 0;
while(regionVar >= regBinEnd[i])
i++;
regionNum = ((i + myRank) % numReg) + 1;
}
//Pick the bin size of the region and determine the number of elements.
binSize = rand() % 1000;
if(binSize < 773) {
elements = rand() % 15 + 1;
}
else if(binSize < 937) {
elements = rand() % 16 + 16;
}
else if(binSize < 970) {
elements = rand() % 32 + 32;
}
else if(binSize < 974) {
elements = rand() % 64 + 64;
}
else if(binSize < 978) {
elements = rand() % 128 + 128;
}
else if(binSize < 981) {
elements = rand() % 256 + 256;
}
else
elements = rand() % 1537 + 512;
runto = elements + nextIndex;
//Store the elements. If we hit the end before we run out of elements then just stop.
while (nextIndex < runto && nextIndex < numElem) {
regNumList_h[nextIndex] = regionNum;
nextIndex++;
}
lastReg = regionNum;
}
}
// Convert regNumList to region index sets
// First, count size of each region
for (Index_t i=0 ; i<numElem ; ++i) {
int r = regNumList_h[i]-1; // region index == regnum-1
regElemSize[r]++;
}
Index_t rep;
// Second, allocate each region index set
for (Index_t r=0; r<numReg ; ++r) {
if(r < numReg/2)
rep = 1;
else if(r < (numReg - (numReg+15)/20))
rep = 1 + cost;
else
rep = 10 * (1+ cost);
regReps_h[r] = rep;
}
sortRegions(regReps_h, regSorted_h);
regCSR_h[0] = 0;
// Second, allocate each region index set
for (Index_t i=1 ; i<numReg ; ++i) {
regCSR_h[i] = regCSR_h[i-1] + regElemSize[i-1];
}
// Third, fill index sets
for (Index_t i=0 ; i<numElem ; ++i) {
Index_t r = regSorted_h[regNumList_h[i]-1]; // region index == regnum-1
regElemlist_h[regCSR_h[r]] = i;
regCSR_h[r]++;
}
// Copy to device
regCSR = regCSR_h; // records the begining and end of each region
regReps = regReps_h; // records the rep number per region
regNumList = regNumList_h; // Region number per domain element
regElemlist = regElemlist_h; // region indexset
regSorted = regSorted_h; // keeps index of sorted regions
} // end of create function
static inline
void TimeIncrement(Domain* domain)
{
// To make sure dtcourant and dthydro have been updated on host
hipEventSynchronize(domain->time_constraint_computed);
Real_t targetdt = domain->stoptime - domain->time_h;
if ((domain->dtfixed <= Real_t(0.0)) && (domain->cycle != Int_t(0))) {
Real_t ratio ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt;
if ( *(domain->dtcourant_h) < gnewdt) {
gnewdt = *(domain->dtcourant_h) / Real_t(2.0) ;
}
if ( *(domain->dthydro_h) < gnewdt) {
gnewdt = *(domain->dthydro_h) * Real_t(2.0) / Real_t(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1,
((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE),
MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
Real_t olddt = domain->deltatime_h;
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain->deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > domain->deltatimemultub) {
newdt = olddt*domain->deltatimemultub ;
}
}
if (newdt > domain->dtmax) {
newdt = domain->dtmax ;
}
domain->deltatime_h = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain->deltatime_h) &&
(targetdt < (Real_t(4.0) * domain->deltatime_h / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain->deltatime_h / Real_t(3.0) ;
}
if (targetdt < domain->deltatime_h) {
domain->deltatime_h = targetdt ;
}
domain->time_h += domain->deltatime_h ;
++domain->cycle ;
}
__device__
static
__forceinline__
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
/*
b[0][4] = - cjxxi - cjxet + cjxze;
b[0][5] = + cjxxi - cjxet + cjxze;
b[0][6] = + cjxxi + cjxet + cjxze;
b[0][7] = - cjxxi + cjxet + cjxze;
*/
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
static
__device__
__forceinline__
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
static
__device__
__forceinline__
void SumElemFaceNormal_warp_per_4cell(
Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
const Real_t x, const Real_t y, const Real_t z,
int node,
int n0, int n1, int n2, int n3)
{
Real_t coef0 = Real_t(0.5);
Real_t coef1 = Real_t(0.5);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
if (node == n0 || node == n1)
coef0 = -coef0;
if (node == n0 || node == n3)
coef1 = -coef1;
}
else
{
coef0 = Real_t(0.);
coef1 = Real_t(0.);
}
Real_t bisectX0 = coef0*x;
Real_t bisectY0 = coef0*y;
Real_t bisectZ0 = coef0*z;
Real_t bisectX1 = coef1*x;
Real_t bisectY1 = coef1*y;
Real_t bisectZ1 = coef1*z;
SumOverNodesShfl(bisectX0);
SumOverNodesShfl(bisectY0);
SumOverNodesShfl(bisectZ0);
SumOverNodesShfl(bisectX1);
SumOverNodesShfl(bisectY1);
SumOverNodesShfl(bisectZ1);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
*normalX0 += areaX;
*normalY0 += areaY;
*normalZ0 += areaZ;
}
}
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
__global__
void AddNodeForcesFromElems_kernel( Index_t numNode,
Index_t padded_numNode,
const Int_t* nodeElemCount,
const Int_t* nodeElemStart,
const Index_t* nodeElemCornerList,
const Real_t* fx_elem,
const Real_t* fy_elem,
const Real_t* fz_elem,
Real_t* fx_node,
Real_t* fy_node,
Real_t* fz_node,
const Int_t num_threads)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < num_threads)
{
Index_t g_i = tid;
Int_t count=nodeElemCount[g_i];
Int_t start=nodeElemStart[g_i];
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++)
{
Index_t pos=nodeElemCornerList[start+j]; // Uncoalesced access here
fx += fx_elem[pos];
fy += fy_elem[pos];
fz += fz_elem[pos];
}
fx_node[g_i]=fx;
fy_node[g_i]=fy;
fz_node[g_i]=fz;
}
}
static
__device__
__forceinline__
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
static
__device__
__forceinline__
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
static
__device__
__forceinline__
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}
__device__
__forceinline__
void CalcHourglassModes(const Real_t xn[8], const Real_t yn[8], const Real_t zn[8],
const Real_t dvdxn[8], const Real_t dvdyn[8], const Real_t dvdzn[8],
Real_t hourgam[8][4], Real_t volinv)
{
Real_t hourmodx, hourmody, hourmodz;
hourmodx = xn[0] + xn[1] - xn[2] - xn[3] - xn[4] - xn[5] + xn[6] + xn[7];
hourmody = yn[0] + yn[1] - yn[2] - yn[3] - yn[4] - yn[5] + yn[6] + yn[7];
hourmodz = zn[0] + zn[1] - zn[2] - zn[3] - zn[4] - zn[5] + zn[6] + zn[7]; // 21
hourgam[0][0] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][0] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][0] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][0] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][0] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][0] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][0] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][0] = 1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz); // 60
hourmodx = xn[0] - xn[1] - xn[2] + xn[3] - xn[4] + xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] - yn[2] + yn[3] - yn[4] + yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] - zn[2] + zn[3] - zn[4] + zn[5] + zn[6] - zn[7];
hourgam[0][1] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][1] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][1] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][1] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][1] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][1] = 1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][1] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][1] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = xn[0] - xn[1] + xn[2] - xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] + yn[2] - yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] + zn[2] - zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][2] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][2] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][2] = 1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][2] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][2] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][2] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][2] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][2] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = -xn[0] + xn[1] - xn[2] + xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = -yn[0] + yn[1] - yn[2] + yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = -zn[0] + zn[1] - zn[2] + zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][3] = -1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][3] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][3] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][3] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][3] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][3] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][3] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][3] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
}
template< bool hourg_gt_zero >
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,4)
#else
__launch_bounds__(64,8)
#endif
void CalcVolumeForceForElems_kernel(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
const Real_t* __restrict__ x, const Real_t* __restrict__ y, const Real_t* __restrict__ z,
const Real_t* __restrict__ xd, const Real_t* __restrict__ yd, const Real_t* __restrict__ zd,
//TextureObj<Real_t> x, TextureObj<Real_t> y, TextureObj<Real_t> z,
//TextureObj<Real_t> xd, TextureObj<Real_t> yd, TextureObj<Real_t> zd,
//TextureObj<Real_t>* x, TextureObj<Real_t>* y, TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd, TextureObj<Real_t>* yd, TextureObj<Real_t>* zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn[8],yn[8],zn[8];;
Real_t xdn[8],ydn[8],zdn[8];;
Real_t dvdxn[8],dvdyn[8],dvdzn[8];;
Real_t hgfx[8],hgfy[8],hgfz[8];;
Real_t hourgam[8][4];
Real_t coefficient;
int elem=blockDim.x*blockIdx.x+threadIdx.x;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t n[8];
#pragma unroll
for (int i=0;i<8;i++) {
n[i] = nodelist[elem+i*padded_numElem];
}
Real_t volinv = Real_t(1.0) / det;
//#pragma unroll
//for (int i=0;i<8;i++) {
// xn[i] =x[n[i]];
// yn[i] =y[n[i]];
// zn[i] =z[n[i]];
//}
#pragma unroll
for (int i=0;i<8;i++)
xn[i] =x[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
yn[i] =y[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zn[i] =z[n[i]];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
CalcElemVolumeDerivative(dvdxn, dvdyn, dvdzn, xn, yn, zn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
CalcHourglassModes(xn,yn,zn,dvdxn,dvdyn,dvdzn,hourgam,volinv);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t B[3][8];
CalcElemShapeFunctionDerivatives(xn, yn, zn, B, &det);
CalcElemNodeNormals( B[0] , B[1], B[2], xn, yn, zn);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
#pragma unroll
for (int i=0;i<8;i++)
{
hgfx[i] = -( sigxx*B[0][i] );
hgfy[i] = -( sigxx*B[1][i] );
hgfz[i] = -( sigxx*B[2][i] );
}
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
// #pragma unroll
// for (int i=0;i<8;i++) {
// xdn[i] =xd[n[i]];
// ydn[i] =yd[n[i]];
// zdn[i] =zd[n[i]];
// }
#pragma unroll
for (int i=0;i<8;i++)
xdn[i] =xd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
ydn[i] =yd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zdn[i] =zd[n[i]];
CalcElemFBHourglassForce
( &xdn[0],&ydn[0],&zdn[0],
hourgam[0],hourgam[1],hourgam[2],hourgam[3],
hourgam[4],hourgam[5],hourgam[6],hourgam[7],
coefficient,
&hgfx[0],&hgfy[0],&hgfz[0]
);
}
#ifdef DOUBLE_PRECISION
#pragma unroll
for (int node=0;node<8;node++)
{
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx[node];
fy_elem[store_loc]=hgfy[node];
fz_elem[store_loc]=hgfz[node];
}
#else
#pragma unroll
for (int i=0;i<8;i++)
{
Index_t ni= n[i];
atomicAdd(&fx_node[ni],hgfx[i]);
atomicAdd(&fy_node[ni],hgfy[i]);
atomicAdd(&fz_node[ni],hgfz[i]);
}
#endif
} // If elem < numElem
}
template< bool hourg_gt_zero, int cta_size>
__global__
void CalcVolumeForceForElems_kernel_warp_per_4cell(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
//const Real_t __restrict__ *x, const Real_t __restrict__ *y, const Real_t __restrict__ *z,
//const Real_t __restrict__ *xd, const Real_t __restrict__ *yd, const Real_t __restrict__ *zd,
const Real_t *x, const Real_t *y, const Real_t *z,
const Real_t *xd, const Real_t *yd, const Real_t *zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn,yn,zn;;
Real_t xdn,ydn,zdn;;
Real_t dvdxn,dvdyn,dvdzn;;
Real_t hgfx,hgfy,hgfz;;
Real_t hourgam[4];
Real_t coefficient;
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int elem = tid >> 3; // elem = tid/8
int node = tid & 7; // node = tid%8
// elem within cta
// int cta_elem = threadIdx.x/8;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t node_id;
node_id = nodelist[elem+node*padded_numElem];
Real_t volinv = Real_t(1.0) / det;
xn =x[node_id];
yn =y[node_id];
zn =z[node_id];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
// Use octal number to represent the indices for each node
//ind0 = 012307456;
//ind1 = 023016745;
//ind2 = 030125674;
//ind3 = 045670123;
//ind4 = 056743012;
//ind5 = 074561230;
//int mask = 7u << (3*node;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
}
VOLUDER(utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
dvdxn);
VOLUDER(utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
dvdyn);
VOLUDER(utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
dvdzn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
Real_t hourmodx, hourmody, hourmodz;
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t b[3];
/*************************************************/
//CalcElemShapeFunctionDerivatives_warp_per_4cell(xn, yn, zn, B, &det);
/*************************************************/
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
fjxxi = fjxet = fjxze = Real_t(0.125)*xn;
fjyxi = fjyet = fjyze = Real_t(0.125)*yn;
fjzxi = fjzet = fjzze = Real_t(0.125)*zn;
if (node==0 || node==3 || node==7 || node==4)
{
fjxxi = -fjxxi;
fjyxi = -fjyxi;
fjzxi = -fjzxi;
}
if (node==0 || node==5 || node==1 || node==4)
{
fjxet = -fjxet;
fjyet = -fjyet;
fjzet = -fjzet;
}
if (node==0 || node==3 || node==1 || node==2)
{
fjxze = -fjxze;
fjyze = -fjyze;
fjzze = -fjzze;
}
SumOverNodesShfl(fjxxi);
SumOverNodesShfl(fjxet);
SumOverNodesShfl(fjxze);
SumOverNodesShfl(fjyxi);
SumOverNodesShfl(fjyet);
SumOverNodesShfl(fjyze);
SumOverNodesShfl(fjzxi);
SumOverNodesShfl(fjzet);
SumOverNodesShfl(fjzze);
/* compute cofactors */
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
Real_t coef_xi, coef_et, coef_ze;
if (node==0 || node==3 || node==4 || node==7)
coef_xi = Real_t(-1.);
else
coef_xi = Real_t(1.);
if (node==0 || node==1 || node==4 || node==5)
coef_et = Real_t(-1.);
else
coef_et = Real_t(1.);
if (node==0 || node==1 || node==2 || node==3)
coef_ze = Real_t(-1.);
else
coef_ze = Real_t(1.);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0] = coef_xi * cjxxi + coef_et * cjxet + coef_ze * cjxze;
b[1] = coef_xi * cjyxi + coef_et * cjyet + coef_ze * cjyze;
b[2] = coef_xi * cjzxi + coef_et * cjzet + coef_ze * cjzze;
/* calculate jacobian determinant (volume) */
det = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
/*************************************************/
//CalcElemNodeNormals_warp_per_4cell( B[0] , B[1], B[2], xn, yn, zn);
/*************************************************/
b[0] = Real_t(0.0);
b[1] = Real_t(0.0);
b[2] = Real_t(0.0);
// Six faces, if no
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,1,2,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,4,5,1);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 1,5,6,2);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 2,6,7,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 3,7,4,0);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 4,7,6,5);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
hgfx = -( sigxx*b[0] );
hgfy = -( sigxx*b[1] );
hgfz = -( sigxx*b[2] );
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
xdn = xd[node_id];
ydn = yd[node_id];
zdn = zd[node_id];
Real_t hgfx_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xdn;
SumOverNodesShfl(h);
hgfx_temp+=hourgam[i]*h;
}
hgfx_temp *= coefficient;
hgfx += hgfx_temp;
Real_t hgfy_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*ydn;
SumOverNodesShfl(h);
hgfy_temp+=hourgam[i]*h;
}
hgfy_temp *= coefficient;
hgfy += hgfy_temp;
Real_t hgfz_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zdn;
SumOverNodesShfl(h);
hgfz_temp+=hourgam[i]*h;
}
hgfz_temp *= coefficient;
hgfz += hgfz_temp;
}
#ifdef DOUBLE_PRECISION
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx;
fy_elem[store_loc]=hgfy;
fz_elem[store_loc]=hgfz;
#else
atomicAdd(&fx_node[node_id],hgfx);
atomicAdd(&fy_node[node_id],hgfy);
atomicAdd(&fz_node[node_id],hgfz);
#endif
} // If elem < numElem
}
static inline
void CalcVolumeForceForElems(const Real_t hgcoef,Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
int num_threads = numElem ;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
hipLaunchKernelGGL(( CalcVolumeForceForElems_kernel<true>) , dim3(dimGrid),dim3(block_size), 0, 0,
domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
hipLaunchKernelGGL(( CalcVolumeForceForElems_kernel<false>) , dim3(dimGrid),dim3(block_size), 0, 0,
domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,block_size);
hipLaunchKernelGGL(( AddNodeForcesFromElems_kernel), dim3(dimGrid),dim3(block_size), 0, 0,
domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
// hipDeviceSynchronize();
// cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
/*
static inline
void CalcVolumeForceForElems_warp_per_4cell(const Real_t hgcoef,Domain *domain)
{
// We're gonna map one warp per 4 cells, i.e. one thread per vertex
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
const int warps_per_cta = 2;
const int cta_size = warps_per_cta * 32;
int num_threads = numElem*8;
int dimGrid = PAD_DIV(num_threads,cta_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel_warp_per_4cell<true, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
//domain->tex_x, domain->tex_y, domain->tex_z, domain->tex_xd, domain->tex_yd, domain->tex_zd,
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel_warp_per_4cell<false, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,cta_size);
AddNodeForcesFromElems_kernel<<<dimGrid,cta_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
//hipDeviceSynchronize();
//cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
*/
static inline
void CalcVolumeForceForElems(Domain* domain)
{
const Real_t hgcoef = domain->hgcoef ;
CalcVolumeForceForElems(hgcoef,domain);
//CalcVolumeForceForElems_warp_per_4cell(hgcoef,domain);
}
static inline void checkErrors(Domain* domain,int its,int myRank)
{
if (*(domain->bad_vol_h) != -1)
{
printf("Rank %i: Volume Error in cell %d at iteration %d\n",myRank,*(domain->bad_vol_h),its);
exit(VolumeError);
}
if (*(domain->bad_q_h) != -1)
{
printf("Rank %i: Q Error in cell %d at iteration %d\n",myRank,*(domain->bad_q_h),its);
exit(QStopError);
}
}
static inline void CalcForceForNodes(Domain *domain)
{
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false) ;
#endif
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
TimeIncrement(domain);
#if USE_MPI
// initialize pointers
domain->d_fx = domain->fx.raw();
domain->d_fy = domain->fy.raw();
domain->d_fz = domain->fz.raw();
Domain_member fieldData[3] ;
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2]) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2]) ;
#endif
}
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < numNode)
{
Real_t one_over_nMass = Real_t(1.)/nodalMass[tid];
xdd[tid]=fx[tid]*one_over_nMass;
ydd[tid]=fy[tid]*one_over_nMass;
zdd[tid]=fz[tid]*one_over_nMass;
}
}
static inline
void CalcAccelerationForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
hipLaunchKernelGGL(( CalcAccelerationForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numNode,
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw(),
domain->fx.raw(),domain->fy.raw(),domain->fz.raw(),
domain->nodalMass.raw());
//hipDeviceSynchronize();
//cudaCheckError();
}
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xyzdd,
Index_t *symm)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNodeBC)
{
xyzdd[symm[i]] = Real_t(0.0) ;
}
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numSymmX,dimBlock);
if (domain->numSymmX > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmX,
domain->xdd.raw(),
domain->symmX.raw());
dimGrid = PAD_DIV(domain->numSymmY,dimBlock);
if (domain->numSymmY > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmY,
domain->ydd.raw(),
domain->symmY.raw());
dimGrid = PAD_DIV(domain->numSymmZ,dimBlock);
if (domain->numSymmZ > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmZ,
domain->zdd.raw(),
domain->symmZ.raw());
}
__global__
void CalcPositionAndVelocityForNodes_kernel(int numNode,
const Real_t deltatime,
const Real_t u_cut,
Real_t* __restrict__ x, Real_t* __restrict__ y, Real_t* __restrict__ z,
Real_t* __restrict__ xd, Real_t* __restrict__ yd, Real_t* __restrict__ zd,
const Real_t* __restrict__ xdd, const Real_t* __restrict__ ydd, const Real_t* __restrict__ zdd)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNode)
{
Real_t xdtmp, ydtmp, zdtmp, dt;
dt = deltatime;
xdtmp = xd[i] + xdd[i] * dt ;
ydtmp = yd[i] + ydd[i] * dt ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;
if( FABS(ydtmp) < u_cut ) ydtmp = 0.0;
if( FABS(zdtmp) < u_cut ) zdtmp = 0.0;
x[i] += xdtmp * dt;
y[i] += ydtmp * dt;
z[i] += zdtmp * dt;
xd[i] = xdtmp;
yd[i] = ydtmp;
zd[i] = zdtmp;
}
}
static inline
void CalcPositionAndVelocityForNodes(const Real_t u_cut, Domain* domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
hipLaunchKernelGGL(( CalcPositionAndVelocityForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numNode,domain->deltatime_h,u_cut,
domain->x.raw(),domain->y.raw(),domain->z.raw(),
domain->xd.raw(),domain->yd.raw(),domain->zd.raw(),
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw());
//hipDeviceSynchronize();
//cudaCheckError();
}
static inline
void LagrangeNodal(Domain *domain)
{
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false) ;
#endif
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
CalcPositionAndVelocityForNodes(u_cut, domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
// initialize pointers
domain->d_x = domain->x.raw();
domain->d_y = domain->y.raw();
domain->d_z = domain->z.raw();
domain->d_xd = domain->xd.raw();
domain->d_yd = domain->yd.raw();
domain->d_zd = domain->zd.raw();
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2]) ;
CommSyncPosVelGpu(*domain, &domain->streams[2]) ;
#endif
#endif
return;
}
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t temp = (fx * gx + fy * gy + fz * gz);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
temp * temp;
return area ;
}
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ; // 38
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}
__device__
static
__forceinline__
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
Real_t tmp1 = (xvel[0]-xvel[6]);
Real_t tmp2 = (xvel[1]-xvel[7]);
Real_t tmp3 = (xvel[2]-xvel[4]);
Real_t tmp4 = (xvel[3]-xvel[5]);
d[0] = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dxddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dxddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (yvel[0]-yvel[6]);
tmp2 = (yvel[1]-yvel[7]);
tmp3 = (yvel[2]-yvel[4]);
tmp4 = (yvel[3]-yvel[5]);
d[1] = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dyddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dyddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (zvel[0]-zvel[6]);
tmp2 = (zvel[1]-zvel[7]);
tmp3 = (zvel[2]-zvel[4]);
tmp4 = (zvel[3]-zvel[5]);
d[2] = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
dzddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dzddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
static __device__ __forceinline__
void CalcMonoGradient(Real_t *x, Real_t *y, Real_t *z,
Real_t *xv, Real_t *yv, Real_t *zv,
Real_t vol,
Real_t *delx_zeta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delv_xi,
Real_t *delx_eta,
Real_t *delv_eta)
{
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x[0],x[1],x[5],x[4]) - SUM4(x[3],x[2],x[6],x[7])) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y[0],y[1],y[5],y[4]) - SUM4(y[3],y[2],y[6],y[7])) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z[0],z[1],z[5],z[4]) - SUM4(z[3],z[2],z[6],z[7])) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x[1],x[2],x[6],x[5]) - SUM4(x[0],x[3],x[7],x[4])) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y[1],y[2],y[6],y[5]) - SUM4(y[0],y[3],y[7],y[4])) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z[1],z[2],z[6],z[5]) - SUM4(z[0],z[3],z[7],z[4])) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x[4],x[5],x[6],x[7]) - SUM4(x[0],x[1],x[2],x[3])) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y[4],y[5],y[6],y[7]) - SUM4(y[0],y[1],y[2],y[3])) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z[4],z[5],z[6],z[7]) - SUM4(z[0],z[1],z[2],z[3])) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
*delx_zeta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[4],xv[5],xv[6],xv[7]) - SUM4(xv[0],xv[1],xv[2],xv[3])) ;
dyv = Real_t(0.25)*(SUM4(yv[4],yv[5],yv[6],yv[7]) - SUM4(yv[0],yv[1],yv[2],yv[3])) ;
dzv = Real_t(0.25)*(SUM4(zv[4],zv[5],zv[6],zv[7]) - SUM4(zv[0],zv[1],zv[2],zv[3])) ;
*delv_zeta = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
*delx_xi = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[1],xv[2],xv[6],xv[5]) - SUM4(xv[0],xv[3],xv[7],xv[4])) ;
dyv = Real_t(0.25)*(SUM4(yv[1],yv[2],yv[6],yv[5]) - SUM4(yv[0],yv[3],yv[7],yv[4])) ;
dzv = Real_t(0.25)*(SUM4(zv[1],zv[2],zv[6],zv[5]) - SUM4(zv[0],zv[3],zv[7],zv[4])) ;
*delv_xi = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
*delx_eta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv[0],xv[1],xv[5],xv[4]) - SUM4(xv[3],xv[2],xv[6],xv[7])) ;
dyv = Real_t(-0.25)*(SUM4(yv[0],yv[1],yv[5],yv[4]) - SUM4(yv[3],yv[2],yv[6],yv[7])) ;
dzv = Real_t(-0.25)*(SUM4(zv[0],zv[1],zv[5],zv[4]) - SUM4(zv[3],zv[2],zv[6],zv[7])) ;
*delv_eta = ax*dxv + ay*dyv + az*dzv ;
#undef SUM4
}
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,6) // 64-bit //YKT: For Volta(64,6) For Minsky(64,8)
#else
__launch_bounds__(64,16) // 32-bit
#endif
void CalcKinematicsAndMonotonicQGradient_kernel(
Index_t numElem, Index_t padded_numElem, const Real_t dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
//TextureObj<Real_t> x,
//TextureObj<Real_t> y,
//TextureObj<Real_t> z,
//TextureObj<Real_t> xd,
//TextureObj<Real_t> yd,
//TextureObj<Real_t> zd,
//TextureObj<Real_t>* x,
//TextureObj<Real_t>* y,
//TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd,
//TextureObj<Real_t>* yd,
//TextureObj<Real_t>* zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)
{
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
//#pragma unroll
//for( Index_t lnode=0 ; lnode<8 ; ++lnode )
//{
// Index_t gnode = nodelist[k+lnode*padded_numElem];
// nodes[lnode] = gnode;
// x_local[lnode] = x[gnode];
// y_local[lnode] = y[gnode];
// z_local[lnode] = z[gnode];
//}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodes[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt;
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
Real_t detJ;
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// ------------------------
// CALC LAGRANGE ELEM 2
// ------------------------
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = D[0] + D[1] + D[2];
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] = D[0] - vdovthird ;
dyy[k] = D[1] - vdovthird ;
dzz[k] = D[2] - vdovthird ;
// ------------------------
// CALC MONOTONIC Q GRADIENT
// ------------------------
Real_t vol = volo[k]*vnew[k];
// Undo x_local update
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j ) {
x_local[j] += dt2 * xd_local[j];
y_local[j] += dt2 * yd_local[j];
z_local[j] += dt2 * zd_local[j];
}
CalcMonoGradient(x_local,y_local,z_local,xd_local,yd_local,zd_local,
vol,
&delx_zeta[k],&delv_zeta[k],&delx_xi[k],
&delv_xi[k], &delx_eta[k], &delv_eta[k]);
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}
static inline
void CalcKinematicsAndMonotonicQGradient(Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
hipLaunchKernelGGL(( CalcKinematicsAndMonotonicQGradient_kernel), dim3(dimGrid),dim3(block_size), 0, 0,
numElem,padded_numElem, domain->deltatime_h,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//hipDeviceSynchronize();
//cudaCheckError();
}
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t* regElemlist,
// const Index_t* __restrict__ regElemlist,
Index_t *elemBC,
Index_t *lxim,
Index_t *lxip,
Index_t *letam,
Index_t *letap,
Index_t *lzetam,
Index_t *lzetap,
Real_t *delv_xi,
Real_t *delv_eta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delx_eta,
Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq, Real_t *ql,
Real_t *q,
Real_t qstop,
Index_t* bad_q
)
{
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = regElemlist[ielem];
Int_t bcMask = elemBC[i] ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]] ; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
// Don't allow excessive artificial viscosity
if (q[i] > qstop)
*(bad_q) = i;
}
}
static inline
void CalcMonotonicQRegionForElems(Domain *domain)
{
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = domain->monoq_max_slope ;
Real_t monoq_limiter_mult = domain->monoq_limiter_mult ;
Real_t qlc_monoq = domain->qlc_monoq;
Real_t qqc_monoq = domain->qqc_monoq;
Index_t elength = domain->numElem;
Index_t dimBlock= 128;
Index_t dimGrid = PAD_DIV(elength,dimBlock);
hipLaunchKernelGGL(( CalcMonotonicQRegionForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
domain->regElemlist.raw(),domain->elemBC.raw(),
domain->lxim.raw(),domain->lxip.raw(),
domain->letam.raw(),domain->letap.raw(),
domain->lzetam.raw(),domain->lzetap.raw(),
domain->delv_xi->raw(),domain->delv_eta->raw(),domain->delv_zeta->raw(),
domain->delx_xi->raw(),domain->delx_eta->raw(),domain->delx_zeta->raw(),
domain->vdov.raw(),domain->elemMass.raw(),domain->volo.raw(),domain->vnew->raw(),
domain->qq.raw(),domain->ql.raw(),
domain->q.raw(),
domain->qstop,
domain->bad_q_h
);
//hipDeviceSynchronize();
//cudaCheckError();
}
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t iz)
{
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t zn)
{
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = zn;
}
}
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc =Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}
__device__ inline
Index_t giveMyRegion(const Index_t* regCSR,const Index_t i, const Index_t numReg)
{
for(Index_t reg = 0; reg < numReg-1; reg++)
if(i < regCSR[reg])
return reg;
return (numReg-1);
}
__global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel(
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* __restrict__ ql,
Real_t* __restrict__ qq,
Real_t* __restrict__ vnew,
Real_t* __restrict__ v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* __restrict__ regElemlist,
// const Index_t* __restrict__ regElemlist,
Real_t* __restrict__ e,
Real_t* __restrict__ delv,
Real_t* __restrict__ p,
Real_t* __restrict__ q,
Real_t ss4o3,
Real_t* __restrict__ ss,
Real_t v_cut,
Index_t* __restrict__ bad_vol,
const Int_t cost,
const Index_t* regCSR,
const Index_t* regReps,
const Index_t numReg
)
{
Real_t e_old, delvc, p_old, q_old, e_temp, delvc_temp, p_temp, q_temp;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, qq_temp, ql_temp, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = regElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,zidx);
/********************** Start EvalEOSForElems **************************/
// Here we need to find out what region this element belongs to and what is the rep value!
Index_t region = giveMyRegion(regCSR,i,numReg);
Index_t rep = regReps[region];
e_temp = e[zidx];
p_temp = p[zidx];
q_temp = q[zidx];
qq_temp = qq[zidx] ;
ql_temp = ql[zidx] ;
delvc_temp = delv[zidx];
for(int r=0; r < rep; r++)
{
e_old = e_temp;
p_old = p_temp;
q_old = q_temp;
qq_old = qq_temp;
ql_old = ql_temp;
delvc = delvc_temp;
work = Real_t(0.);
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
// qq_old = qq[zidx] ;
// ql_old = ql[zidx] ;
// work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
}//end for rep
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,zidx);
/********************** End EvalEOSForElems **************************/
UpdateVolumesForElems_device(length,v_cut,vnew,v,zidx);
}
}
static inline
void ApplyMaterialPropertiesAndUpdateVolume(Domain *domain)
{
Index_t length = domain->numElem ;
if (length != 0) {
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(length,dimBlock);
hipLaunchKernelGGL(( ApplyMaterialPropertiesAndUpdateVolume_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,
domain->refdens,
domain->e_cut,
domain->emin,
domain->ql.raw(),
domain->qq.raw(),
domain->vnew->raw(),
domain->v.raw(),
domain->pmin,
domain->p_cut,
domain->q_cut,
domain->eosvmin,
domain->eosvmax,
domain->regElemlist.raw(),
domain->e.raw(),
domain->delv.raw(),
domain->p.raw(),
domain->q.raw(),
domain->ss4o3,
domain->ss.raw(),
domain->v_cut,
domain->bad_vol_h,
domain->cost,
domain->regCSR.raw(),
domain->regReps.raw(),
domain->numReg
);
//hipDeviceSynchronize();
//cudaCheckError();
}
}
static inline
void LagrangeElements(Domain *domain)
{
int allElem = domain->numElem + /* local elem */
2*domain->sizeX*domain->sizeY + /* plane ghosts */
2*domain->sizeX*domain->sizeZ + /* row ghosts */
2*domain->sizeY*domain->sizeZ ; /* col ghosts */
domain->vnew = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dxx = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dyy = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dzz = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_xi = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_eta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
#if USE_MPI
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true) ;
#endif
/*********************************************/
/* Calc Kinematics and Monotic Q Gradient */
/*********************************************/
CalcKinematicsAndMonotonicQGradient(domain);
#if USE_MPI
Domain_member fieldData[3] ;
// initialize pointers
domain->d_delv_xi = domain->delv_xi->raw();
domain->d_delv_eta = domain->delv_eta->raw();
domain->d_delv_zeta = domain->delv_zeta->raw();
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2]) ;
CommMonoQGpu(*domain, domain->streams[2]) ;
#endif
Allocator<Vector_d<Real_t> >::free(domain->dxx,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dyy,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dzz,domain->numElem);
/**********************************
* Calc Monotic Q Region
**********************************/
CalcMonotonicQRegionForElems(domain);
Allocator<Vector_d<Real_t> >::free(domain->delx_xi,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_eta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_zeta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_zeta,allElem);
// printf("\n --Start of ApplyMaterials! \n");
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// printf("\n --End of ApplyMaterials! \n");
Allocator<Vector_d<Real_t> >::free(domain->vnew,domain->numElem);
}
template<int block_size>
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcTimeConstraintsForElems_kernel(
Index_t length,
Real_t qqc2,
Real_t dvovmax,
Index_t *matElemlist,
Real_t *ss,
Real_t *vdov,
Real_t *arealg,
Real_t *dev_mindtcourant,
Real_t *dev_mindthydro)
{
int tid = threadIdx.x;
int i=blockDim.x*blockIdx.x + tid;
__shared__ volatile Real_t s_mindthydro[block_size];
__shared__ volatile Real_t s_mindtcourant[block_size];
Real_t mindthydro = Real_t(1.0e+20) ;
Real_t mindtcourant = Real_t(1.0e+20) ;
Real_t dthydro = mindthydro;
Real_t dtcourant = mindtcourant;
while (i<length) {
Index_t indx = matElemlist[i] ;
Real_t vdov_tmp = vdov[indx];
// Computing dt_hydro
if (vdov_tmp != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov_tmp)+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
if (dthydro < mindthydro)
mindthydro = dthydro;
// Computing dt_courant
Real_t ss_tmp = ss[indx];
Real_t area_tmp = arealg[indx];
Real_t dtf = ss_tmp * ss_tmp ;
dtf += ((vdov_tmp < 0.) ? qqc2*area_tmp*area_tmp*vdov_tmp*vdov_tmp : 0.);
dtf = area_tmp / SQRT(dtf) ;
/* determine minimum timestep with its corresponding elem */
if (vdov_tmp != Real_t(0.) && dtf < dtcourant) {
dtcourant = dtf ;
}
if (dtcourant< mindtcourant)
mindtcourant= dtcourant;
i += gridDim.x*blockDim.x;
}
s_mindthydro[tid] = mindthydro;
s_mindtcourant[tid] = mindtcourant;
__syncthreads();
// Do shared memory reduction
if (block_size >= 1024) {
if (tid < 512) {
s_mindthydro[tid] = min( s_mindthydro[tid] , s_mindthydro[tid + 512]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 512]) ; }
__syncthreads(); }
if (block_size >= 512) {
if (tid < 256) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 256]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 256]) ; }
__syncthreads(); }
if (block_size >= 256) {
if (tid < 128) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 128]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 128]) ; }
__syncthreads(); }
if (block_size >= 128) {
if (tid < 64) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 64]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 64]) ; }
__syncthreads(); }
if (tid < 32) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 32]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 32]) ;
}
if (tid < 16) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 16]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 16]) ;
}
if (tid < 8) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 8]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 8]) ;
}
if (tid < 4) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 4]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 4]) ;
}
if (tid < 2) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 2]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 2]) ;
}
if (tid < 1) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 1]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 1]) ;
}
// Store in global memory
if (tid==0) {
dev_mindtcourant[blockIdx.x] = s_mindtcourant[0];
dev_mindthydro[blockIdx.x] = s_mindthydro[0];
}
}
template <int block_size>
__global__
void CalcMinDtOneBlock(Real_t* dev_mindthydro, Real_t* dev_mindtcourant, Real_t* dtcourant, Real_t* dthydro, Index_t shared_array_size)
{
volatile __shared__ Real_t s_data[block_size];
int tid = threadIdx.x;
if (blockIdx.x==0)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindtcourant[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dtcourant)= s_data[0];
}
}
else if (blockIdx.x==1)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindthydro[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dthydro) = s_data[0];
}
}
}
static inline
void CalcTimeConstraintsForElems(Domain* domain)
{
Real_t qqc = domain->qqc;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dvovmax = domain->dvovmax ;
const Index_t length = domain->numElem;
const int max_dimGrid = 1024;
const int dimBlock = 128;
int dimGrid=::min(max_dimGrid,PAD_DIV(length,dimBlock));
hipFuncSetCacheConfig(CalcTimeConstraintsForElems_kernel<dimBlock>, hipFuncCachePreferShared);
Vector_d<Real_t>* dev_mindtcourant= Allocator< Vector_d<Real_t> >::allocate(dimGrid);
Vector_d<Real_t>* dev_mindthydro = Allocator< Vector_d<Real_t> >::allocate(dimGrid);
hipLaunchKernelGGL(( CalcTimeConstraintsForElems_kernel<dimBlock>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
length,qqc2,dvovmax,
domain->matElemlist.raw(),domain->ss.raw(),domain->vdov.raw(),domain->arealg.raw(),
dev_mindtcourant->raw(),dev_mindthydro->raw());
// TODO: if dimGrid < 1024, should launch less threads
hipLaunchKernelGGL(( CalcMinDtOneBlock<max_dimGrid>) , dim3(2),dim3(max_dimGrid), max_dimGrid*sizeof(Real_t), domain->streams[1], dev_mindthydro->raw(),dev_mindtcourant->raw(),domain->dtcourant_h,domain->dthydro_h, dimGrid);
hipEventRecord(domain->time_constraint_computed,domain->streams[1]);
Allocator<Vector_d<Real_t> >::free(dev_mindtcourant,dimGrid);
Allocator<Vector_d<Real_t> >::free(dev_mindthydro,dimGrid);
}
static inline
void LagrangeLeapFrog(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal(domain);
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(domain);
CalcTimeConstraintsForElems(domain);
}
void printUsage(char* argv[])
{
printf("Usage: \n");
printf("Unstructured grid: %s -u <file.lmesh> \n", argv[0]) ;
printf("Structured grid: %s -s numEdgeElems \n", argv[0]) ;
printf("\nExamples:\n") ;
printf("%s -s 45\n", argv[0]) ;
printf("%s -u sedov15oct.lmesh\n", argv[0]) ;
}
#ifdef SAMI
#ifdef __cplusplus
extern "C" {
#endif
#include "silo.h"
#ifdef __cplusplus
}
#endif
#define MAX_LEN_SAMI_HEADER 10
#define SAMI_HDR_NUMBRICK 0
#define SAMI_HDR_NUMNODES 3
#define SAMI_HDR_NUMMATERIAL 4
#define SAMI_HDR_INDEX_START 6
#define SAMI_HDR_MESHDIM 7
#define MAX_ADJACENCY 14 /* must be 14 or greater */
void DumpSAMI(Domain *domain, char *name)
{
DBfile *fp ;
int headerLen = MAX_LEN_SAMI_HEADER ;
int headerInfo[MAX_LEN_SAMI_HEADER];
char varName[] = "brick_nd0";
char coordName[] = "x";
int version = 121 ;
int numElem = int(domain->numElem) ;
int numNode = int(domain->numNode) ;
int count ;
int *materialID ;
int *nodeConnect ;
double *nodeCoord ;
if ((fp = DBCreate(name, DB_CLOBBER, DB_LOCAL,
NULL, DB_PDB)) == NULL)
{
printf("Couldn't create file %s\n", name) ;
exit(1);
}
for (int i=0; i<MAX_LEN_SAMI_HEADER; ++i) {
headerInfo[i] = 0 ;
}
headerInfo[SAMI_HDR_NUMBRICK] = numElem ;
headerInfo[SAMI_HDR_NUMNODES] = numNode ;
headerInfo[SAMI_HDR_NUMMATERIAL] = 1 ;
headerInfo[SAMI_HDR_INDEX_START] = 1 ;
headerInfo[SAMI_HDR_MESHDIM] = 3 ;
DBWrite(fp, "mesh_data", headerInfo, &headerLen, 1, DB_INT) ;
count = 1 ;
DBWrite(fp, "version", &version, &count, 1, DB_INT) ;
nodeConnect = new int[numElem] ;
Vector_h<Index_t> nodelist_h = domain->nodelist;
for (Index_t i=0; i<8; ++i)
{
for (Index_t j=0; j<numElem; ++j) {
nodeConnect[j] = int(nodelist_h[i*domain->padded_numElem + j]) + 1 ;
}
varName[8] = '0' + i;
DBWrite(fp, varName, nodeConnect, &numElem, 1, DB_INT) ;
}
delete [] nodeConnect ;
nodeCoord = new double[numNode] ;
Vector_h<Real_t> x_h = domain->x;
Vector_h<Real_t> y_h = domain->y;
Vector_h<Real_t> z_h = domain->z;
for (Index_t i=0; i<3; ++i)
{
for (Index_t j=0; j<numNode; ++j) {
Real_t coordVal ;
switch(i) {
case 0: coordVal = double(x_h[j]) ; break ;
case 1: coordVal = double(y_h[j]) ; break ;
case 2: coordVal = double(z_h[j]) ; break ;
}
nodeCoord[j] = coordVal ;
}
coordName[0] = 'x' + i ;
DBWrite(fp, coordName, nodeCoord, &numNode, 1, DB_DOUBLE) ;
}
delete [] nodeCoord ;
materialID = new int[numElem] ;
for (Index_t i=0; i<numElem; ++i)
materialID[i] = 1 ;
DBWrite(fp, "brick_material", materialID, &numElem, 1, DB_INT) ;
delete [] materialID ;
DBClose(fp);
}
#endif
#ifdef SAMI
void DumpDomain(Domain *domain)
{
char meshName[64] ;
printf("Dumping SAMI file\n");
sprintf(meshName, "sedov_%d.sami", int(domain->cycle)) ;
DumpSAMI(domain, meshName) ;
}
#endif
void write_solution(Domain* locDom)
{
Vector_h<Real_t> x_h = locDom->x;
Vector_h<Real_t> y_h = locDom->y;
Vector_h<Real_t> z_h = locDom->z;
// printf("Writing solution to file xyz.asc\n");
std::stringstream filename;
filename << "xyz.asc";
FILE *fout = fopen(filename.str().c_str(),"wb");
for (Index_t i=0; i<locDom->numNode; i++) {
fprintf(fout,"%10d\n",i);
fprintf(fout,"%.10f\n",x_h[i]);
fprintf(fout,"%.10f\n",y_h[i]);
fprintf(fout,"%.10f\n",z_h[i]);
}
fclose(fout);
}
///////////////////////////////////////////////////////////////////////////
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side)
{
Int_t testProcs;
Int_t dx, dy, dz;
Int_t myDom;
// Assume cube processor layout for now
testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ;
if (testProcs*testProcs*testProcs != numRanks) {
printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) {
printf("MPI operations only support float and double right now...\n");
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (MAX_FIELDS_PER_MPI_COMM > CACHE_COHERENCE_PAD_REAL) {
printf("corner element comm buffers too small. Fix code.\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
dx = testProcs ;
dy = testProcs ;
dz = testProcs ;
// temporary test
if (dx*dy*dz != numRanks) {
printf("error -- must have as many domains as procs\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
Int_t remainder = dx*dy*dz % numRanks ;
if (myRank < remainder) {
myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ;
}
else {
myDom = remainder*( 1+ (dx*dy*dz / numRanks)) +
(myRank - remainder)*(dx*dy*dz/numRanks) ;
}
*col = myDom % dx ;
*row = (myDom / dx) % dy ;
*plane = myDom / (dx*dy) ;
*side = testProcs;
return;
}
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t its,
Int_t nx,
Int_t numRanks)
{
size_t free_mem, total_mem, used_mem;
hipMemGetInfo(&free_mem, &total_mem);
used_mem= total_mem - free_mem;
#if LULESH_SHOW_PROGRESS == 0
printf(" Used Memory = %8.4f Mb\n", used_mem / (1024.*1024.) );
#endif
// GrindTime1 only takes a single domain into account, and is thus a good way to measure
// processor speed indepdendent of MPI parallelism.
// GrindTime2 takes into account speedups from MPI parallelism
//YKT
size_t elem = nx*nx*nx;
size_t total_elem = elem*numRanks;
Real_t grindTime1 = ((elapsed_time*1e6)/its)/elem;
Real_t grindTime2 = ((elapsed_time*1e6)/its)/total_elem;
//Real_t grindTime1 = ((elapsed_time*1e6)/its)/(nx*nx*nx);
//Real_t grindTime2 = ((elapsed_time*1e6)/its)/(nx*nx*nx*numRanks);
// Copy Energy back to Host
Real_t e_zero;
Real_t* d_ezero_ptr = locDom.e.raw() + locDom.octantCorner; /* octant corner supposed to be 0 */
hipMemcpy(&e_zero, d_ezero_ptr, sizeof(Real_t), hipMemcpyDeviceToHost);
printf("Run completed: \n");
printf(" Problem size = %i \n", nx);
printf(" MPI tasks = %i \n", numRanks);
printf(" Iteration count = %i \n", its);
printf(" Final Origin Energy = %12.6e \n", e_zero);
Real_t MaxAbsDiff = Real_t(0.0);
Real_t TotalAbsDiff = Real_t(0.0);
Real_t MaxRelDiff = Real_t(0.0);
Real_t *e_all = new Real_t[nx * nx];
hipMemcpy(e_all, locDom.e.raw(), nx * nx * sizeof(Real_t), hipMemcpyDeviceToHost);
for (Index_t j=0; j<nx; ++j) {
for (Index_t k=j+1; k<nx; ++k) {
Real_t AbsDiff = FABS(e_all[j*nx+k]-e_all[k*nx+j]);
TotalAbsDiff += AbsDiff;
if (MaxAbsDiff <AbsDiff) MaxAbsDiff = AbsDiff;
Real_t RelDiff = AbsDiff / e_all[k*nx+j];
if (MaxRelDiff <RelDiff) MaxRelDiff = RelDiff;
}
}
delete e_all;
// Quick symmetry check
printf(" Testing Plane 0 of Energy Array on rank 0:\n");
printf(" MaxAbsDiff = %12.6e\n", MaxAbsDiff );
printf(" TotalAbsDiff = %12.6e\n", TotalAbsDiff );
printf(" MaxRelDiff = %12.6e\n\n", MaxRelDiff );
// Timing information
printf("\nElapsed time = %10.2f (s)\n", elapsed_time);
printf("Grind time (us/z/c) = %10.8g (per dom) (%10.8g overall)\n", grindTime1, grindTime2);
printf("FOM = %10.8g (z/s)\n\n", 1000.0/grindTime2); // zones per second
// hfwen
//bool write_solution_flag=true;
bool write_solution_flag=false;
if (write_solution_flag) {
write_solution(&locDom);
}
return ;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
printUsage(argv);
exit( LFileError );
}
if ( strcmp(argv[1],"-u") != 0 && strcmp(argv[1],"-s") != 0 )
{
printUsage(argv);
exit( LFileError ) ;
}
int num_iters = -1;
if (argc == 5) {
num_iters = atoi(argv[4]);
}
bool structured = ( strcmp(argv[1],"-s") == 0 );
Int_t numRanks ;
Int_t myRank ;
#if USE_MPI
Domain_member fieldData ;
MPI_Init(&argc, &argv) ;
MPI_Comm_size(MPI_COMM_WORLD, &numRanks) ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
#else
numRanks = 1;
myRank = 0;
#endif
//YKT
//trap();
cuda_init(myRank);
/* assume cube subdomain geometry for now */
Index_t nx = atoi(argv[2]);
Domain *locDom ;
// Set up the mesh and decompose. Assumes regular cubes for now
Int_t col, row, plane, side;
InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side);
// TODO: change default nr to 11
Int_t nr = 11;
Int_t balance = 1;
Int_t cost = 1;
// TODO: modify this constructor to account for new fields
// TODO: setup communication buffers
locDom = NewDomain(argv, numRanks, col, row, plane, nx, side, structured, nr, balance, cost);
#if USE_MPI
// copy to the host for mpi transfer
locDom->h_nodalMass = locDom->nodalMass;
fieldData = &Domain::get_nodalMass;
// Initial domain boundary communication
CommRecv(*locDom, MSG_COMM_SBN, 1,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false) ;
CommSend(*locDom, MSG_COMM_SBN, 1, &fieldData,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false) ;
CommSBN(*locDom, 1, &fieldData) ;
// copy back to the device
locDom->nodalMass = locDom->h_nodalMass;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
/* timestep to solution */
int its=0;
if (myRank == 0) {
if (structured)
printf("Running until t=%f, Problem size=%dx%dx%d\n",locDom->stoptime,nx,nx,nx);
else
printf("Running until t=%f, Problem size=%d \n",locDom->stoptime,locDom->numElem);
}
hipProfilerStart();
#if USE_MPI
double start = MPI_Wtime();
#else
timeval start;
gettimeofday(&start, NULL) ;
#endif
while(locDom->time_h < locDom->stoptime)
{
// this has been moved after computation of volume forces to hide launch latencies
//TimeIncrement(locDom) ;
LagrangeLeapFrog(locDom) ;
checkErrors(locDom,its,myRank);
#if LULESH_SHOW_PROGRESS
if (myRank == 0 && its % 100 == 0)
printf("cycle = %d, time = %e, dt=%e\n", its+1, double(locDom->time_h), double(locDom->deltatime_h) ) ;
#endif
its++;
if (its == num_iters) break;
}
// make sure GPU finished its work
hipDeviceSynchronize();
// Use reduced max elapsed time
double elapsed_time;
#if USE_MPI
elapsed_time = MPI_Wtime() - start;
#else
timeval end;
gettimeofday(&end, NULL) ;
elapsed_time = (double)(end.tv_sec - start.tv_sec) + ((double)(end.tv_usec - start.tv_usec))/1000000 ;
#endif
double elapsed_timeG;
#if USE_MPI
MPI_Reduce(&elapsed_time, &elapsed_timeG, 1, MPI_DOUBLE,
MPI_MAX, 0, MPI_COMM_WORLD);
#else
elapsed_timeG = elapsed_time;
#endif
hipProfilerStop();
if (myRank == 0)
VerifyAndWriteFinalOutput(elapsed_timeG, *locDom, its, nx, numRanks);
#ifdef SAMI
DumpDomain(locDom) ;
#endif
hipDeviceReset();
#if USE_MPI
MPI_Finalize() ;
#endif
return 0 ;
}
| 7c038288a16b7895c35cb446b6766389a31d0f56.cu | /*
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <util.h>
#include <sm_utils.inl>
#include <cuda.h>
#include <allocator.h>
#include "cuda_profiler_api.h"
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#include "lulesh.h"
//YKT
//extern "C" void trap(void);
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
__device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__device__ __host__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__device__ __host__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
/*
__device__
static
__forceinline__
void SumOverNodes(Real_t& val, volatile Real_t* smem, int cta_elem, int node) {
int tid = (cta_elem << 3) + node;
smem[tid] = val;
if (node < 4)
{
smem[tid] += smem[tid+4];
smem[tid] += smem[tid+2];
smem[tid] += smem[tid+1];
}
val = smem[(cta_elem << 3)];
}
*/
__device__
static
__forceinline__
void SumOverNodesShfl(Real_t& val) {
val += utils::shfl_xor( val, 4, 8);
val += utils::shfl_xor( val, 2, 8);
val += utils::shfl_xor( val, 1, 8);
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
// 11 + 3*14
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
void cuda_init(int rank)
{
Int_t deviceCount, dev;
cudaDeviceProp cuda_deviceProp;
cudaSafeCall( cudaGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
dev = rank % deviceCount;
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
cudaSafeCall( cudaSetDevice(dev) );
struct cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev);
char hostname[256];
gethostname(hostname, sizeof(hostname));
//printf("Host %s using GPU %i: %s\n", hostname, dev, props.name);
cudaSafeCall( cudaGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 3) {
fprintf(stderr, "cuda_init(): This implementation of Lulesh requires device SM 3.0+.\n", dev);
exit(1);
}
#if CUDART_VERSION < 5000
fprintf(stderr,"cuda_init(): This implementation of Lulesh uses texture objects, which is requires Cuda 5.0+.\n");
exit(1);
#endif
}
void AllocateNodalPersistent(Domain* domain, size_t domNodes)
{
domain->x.resize(domNodes) ; /* coordinates */
domain->y.resize(domNodes) ;
domain->z.resize(domNodes) ;
domain->xd.resize(domNodes) ; /* velocities */
domain->yd.resize(domNodes) ;
domain->zd.resize(domNodes) ;
domain->xdd.resize(domNodes) ; /* accelerations */
domain->ydd.resize(domNodes) ;
domain->zdd.resize(domNodes) ;
domain->fx.resize(domNodes) ; /* forces */
domain->fy.resize(domNodes) ;
domain->fz.resize(domNodes) ;
domain->nodalMass.resize(domNodes) ; /* mass */
}
void AllocateElemPersistent(Domain* domain, size_t domElems, size_t padded_domElems)
{
domain->matElemlist.resize(domElems) ; /* material indexset */
domain->nodelist.resize(8*padded_domElems) ; /* elemToNode connectivity */
domain->lxim.resize(domElems) ; /* elem connectivity through face */
domain->lxip.resize(domElems) ;
domain->letam.resize(domElems) ;
domain->letap.resize(domElems) ;
domain->lzetam.resize(domElems) ;
domain->lzetap.resize(domElems) ;
domain->elemBC.resize(domElems) ; /* elem face symm/free-surf flag */
domain->e.resize(domElems) ; /* energy */
domain->p.resize(domElems) ; /* pressure */
domain->q.resize(domElems) ; /* q */
domain->ql.resize(domElems) ; /* linear term for q */
domain->qq.resize(domElems) ; /* quadratic term for q */
domain->v.resize(domElems) ; /* relative volume */
domain->volo.resize(domElems) ; /* reference volume */
domain->delv.resize(domElems) ; /* m_vnew - m_v */
domain->vdov.resize(domElems) ; /* volume derivative over volume */
domain->arealg.resize(domElems) ; /* elem characteristic length */
domain->ss.resize(domElems) ; /* "sound speed" */
domain->elemMass.resize(domElems) ; /* mass */
}
void AllocateSymmX(Domain* domain, size_t size)
{
domain->symmX.resize(size) ;
}
void AllocateSymmY(Domain* domain, size_t size)
{
domain->symmY.resize(size) ;
}
void AllocateSymmZ(Domain* domain, size_t size)
{
domain->symmZ.resize(size) ;
}
void InitializeFields(Domain* domain)
{
/* Basic Field Initialization */
thrust::fill(domain->ss.begin(),domain->ss.end(),0.);
thrust::fill(domain->e.begin(),domain->e.end(),0.);
thrust::fill(domain->p.begin(),domain->p.end(),0.);
thrust::fill(domain->q.begin(),domain->q.end(),0.);
thrust::fill(domain->v.begin(),domain->v.end(),1.);
thrust::fill(domain->xd.begin(),domain->xd.end(),0.);
thrust::fill(domain->yd.begin(),domain->yd.end(),0.);
thrust::fill(domain->zd.begin(),domain->zd.end(),0.);
thrust::fill(domain->xdd.begin(),domain->xdd.end(),0.);
thrust::fill(domain->ydd.begin(),domain->ydd.end(),0.);
thrust::fill(domain->zdd.begin(),domain->zdd.end(),0.);
thrust::fill(domain->nodalMass.begin(),domain->nodalMass.end(),0.);
}
////////////////////////////////////////////////////////////////////////////////
void
Domain::SetupCommBuffers(Int_t edgeNodes)
{
// allocate a buffer large enough for nodal ghost data
maxEdgeSize = MAX(this->sizeX, MAX(this->sizeY, this->sizeZ))+1 ;
maxPlaneSize = CACHE_ALIGN_REAL(maxEdgeSize*maxEdgeSize) ;
maxEdgeSize = CACHE_ALIGN_REAL(maxEdgeSize) ;
// assume communication to 6 neighbors by default
m_rowMin = (m_rowLoc == 0) ? 0 : 1;
m_rowMax = (m_rowLoc == m_tp-1) ? 0 : 1;
m_colMin = (m_colLoc == 0) ? 0 : 1;
m_colMax = (m_colLoc == m_tp-1) ? 0 : 1;
m_planeMin = (m_planeLoc == 0) ? 0 : 1;
m_planeMax = (m_planeLoc == m_tp-1) ? 0 : 1;
#if USE_MPI
// account for face communication
Index_t comBufSize =
(m_rowMin + m_rowMax + m_colMin + m_colMax + m_planeMin + m_planeMax) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for edge communication
comBufSize +=
((m_rowMin & m_colMin) + (m_rowMin & m_planeMin) + (m_colMin & m_planeMin) +
(m_rowMax & m_colMax) + (m_rowMax & m_planeMax) + (m_colMax & m_planeMax) +
(m_rowMax & m_colMin) + (m_rowMin & m_planeMax) + (m_colMin & m_planeMax) +
(m_rowMin & m_colMax) + (m_rowMax & m_planeMin) + (m_colMax & m_planeMin)) *
maxPlaneSize * MAX_FIELDS_PER_MPI_COMM ;
// account for corner communication
// factor of 16 is so each buffer has its own cache line
comBufSize += ((m_rowMin & m_colMin & m_planeMin) +
(m_rowMin & m_colMin & m_planeMax) +
(m_rowMin & m_colMax & m_planeMin) +
(m_rowMin & m_colMax & m_planeMax) +
(m_rowMax & m_colMin & m_planeMin) +
(m_rowMax & m_colMin & m_planeMax) +
(m_rowMax & m_colMax & m_planeMin) +
(m_rowMax & m_colMax & m_planeMax)) * CACHE_COHERENCE_PAD_REAL ;
//hfwen: Why now we cannot do the calls when comBufSize = 0. Is it only on Volta or CUDA9?
if (comBufSize > 0)
{
this->commDataSend = new Real_t[comBufSize] ;
this->commDataRecv = new Real_t[comBufSize] ;
// pin buffers
cudaHostRegister(this->commDataSend, comBufSize*sizeof(Real_t), 0);
cudaHostRegister(this->commDataRecv, comBufSize*sizeof(Real_t), 0);
// prevent floating point exceptions
memset(this->commDataSend, 0, comBufSize*sizeof(Real_t)) ;
memset(this->commDataRecv, 0, comBufSize*sizeof(Real_t)) ;
// allocate shadow GPU buffers
cudaMalloc(&this->d_commDataSend, comBufSize*sizeof(Real_t));
cudaMalloc(&this->d_commDataRecv, comBufSize*sizeof(Real_t));
// prevent floating point exceptions
cudaMemset(this->d_commDataSend, 0, comBufSize*sizeof(Real_t));
cudaMemset(this->d_commDataRecv, 0, comBufSize*sizeof(Real_t));
}
#endif
}
void SetupConnectivityBC(Domain *domain, int edgeElems)
{
int domElems = domain->numElem;
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
/* set up elemement connectivity information */
lxim_h[0] = 0 ;
for (Index_t i=1; i<domElems; ++i) {
lxim_h[i] = i-1 ;
lxip_h[i-1] = i ;
}
lxip_h[domElems-1] = domElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
letam_h[i] = i ;
letap_h[domElems-edgeElems+i] = domElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<domElems; ++i) {
letam_h[i] = i-edgeElems ;
letap_h[i-edgeElems] = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
lzetam_h[i] = i ;
lzetap_h[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<domElems; ++i) {
lzetam_h[i] = i - edgeElems*edgeElems ;
lzetap_h[i-edgeElems*edgeElems] = i ;
}
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
elemBC_h[i] = 0 ; /* clear BCs by default */
}
Index_t ghostIdx[6] ; // offsets to ghost locations
for (Index_t i=0; i<6; ++i) {
ghostIdx[i] = INT_MIN ;
}
Int_t pidx = domElems ;
if (domain->m_planeMin != 0) {
ghostIdx[0] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_planeMax != 0) {
ghostIdx[1] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_rowMin != 0) {
ghostIdx[2] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_rowMax != 0) {
ghostIdx[3] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_colMin != 0) {
ghostIdx[4] = pidx ;
pidx += domain->sizeY*domain->sizeZ ;
}
if (domain->m_colMax != 0) {
ghostIdx[5] = pidx ;
}
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
if (domain->m_planeLoc == 0) {
elemBC_h[rowInc+j] |= ZETA_M_SYMM ;
}
else {
elemBC_h[rowInc+j] |= ZETA_M_COMM ;
lzetam_h[rowInc+j] = ghostIdx[0] + rowInc + j ;
}
if (domain->m_planeLoc == domain->m_tp-1) {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_FREE;
}
else {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_COMM ;
lzetap_h[rowInc+j+domElems-edgeElems*edgeElems] =
ghostIdx[1] + rowInc + j ;
}
if (domain->m_rowLoc == 0) {
elemBC_h[planeInc+j] |= ETA_M_SYMM ;
}
else {
elemBC_h[planeInc+j] |= ETA_M_COMM ;
letam_h[planeInc+j] = ghostIdx[2] + rowInc + j ;
}
if (domain->m_rowLoc == domain->m_tp-1) {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_FREE ;
}
else {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_COMM ;
letap_h[planeInc+j+edgeElems*edgeElems-edgeElems] =
ghostIdx[3] + rowInc + j ;
}
if (domain->m_colLoc == 0) {
elemBC_h[planeInc+j*edgeElems] |= XI_M_SYMM ;
}
else {
elemBC_h[planeInc+j*edgeElems] |= XI_M_COMM ;
lxim_h[planeInc+j*edgeElems] = ghostIdx[4] + rowInc + j ;
}
if (domain->m_colLoc == domain->m_tp-1) {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ;
}
else {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_COMM ;
lxip_h[planeInc+j*edgeElems+edgeElems-1] =
ghostIdx[5] + rowInc + j ;
}
}
}
domain->elemBC = elemBC_h;
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
}
void Domain::BuildMesh(Int_t nx, Int_t edgeNodes, Int_t edgeElems, Int_t domNodes, Int_t padded_domElems, Vector_h<Real_t> &x_h, Vector_h<Real_t> &y_h, Vector_h<Real_t> &z_h, Vector_h<Int_t> &nodelist_h)
{
Index_t meshEdgeElems = m_tp*nx ;
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
// initialize nodal coordinates
Index_t nidx = 0 ;
Real_t tz = Real_t(1.125)*Real_t(m_planeLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t plane=0; plane<edgeNodes; ++plane) {
Real_t ty = Real_t(1.125)*Real_t(m_rowLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t row=0; row<edgeNodes; ++row) {
Real_t tx = Real_t(1.125)*Real_t(m_colLoc*nx)/Real_t(meshEdgeElems) ;
for (Index_t col=0; col<edgeNodes; ++col) {
x_h[nidx] = tx ;
y_h[nidx] = ty ;
z_h[nidx] = tz ;
++nidx ;
// tx += ds ; // may accumulate roundoff...
tx = Real_t(1.125)*Real_t(m_colLoc*nx+col+1)/Real_t(meshEdgeElems) ;
}
// ty += ds ; // may accumulate roundoff...
ty = Real_t(1.125)*Real_t(m_rowLoc*nx+row+1)/Real_t(meshEdgeElems) ;
}
// tz += ds ; // may accumulate roundoff...
tz = Real_t(1.125)*Real_t(m_planeLoc*nx+plane+1)/Real_t(meshEdgeElems) ;
}
x = x_h;
y = y_h;
z = z_h;
nodelist_h.resize(padded_domElems*8);
// embed hexehedral elements in nodal point lattice
Index_t zidx = 0 ;
nidx = 0 ;
for (Index_t plane=0; plane<edgeElems; ++plane) {
for (Index_t row=0; row<edgeElems; ++row) {
for (Index_t col=0; col<edgeElems; ++col) {
nodelist_h[0*padded_domElems+zidx] = nidx ;
nodelist_h[1*padded_domElems+zidx] = nidx + 1 ;
nodelist_h[2*padded_domElems+zidx] = nidx + edgeNodes + 1 ;
nodelist_h[3*padded_domElems+zidx] = nidx + edgeNodes ;
nodelist_h[4*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes ;
nodelist_h[5*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + 1 ;
nodelist_h[6*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ;
nodelist_h[7*padded_domElems+zidx] = nidx + edgeNodes*edgeNodes + edgeNodes ;
++zidx ;
++nidx ;
}
++nidx ;
}
nidx += edgeNodes ;
}
nodelist = nodelist_h;
}
Domain *NewDomain(char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{
Domain *domain = new Domain ;
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
cudaStreamCreate(&(domain->streams[i]));
cudaEventCreateWithFlags(&domain->time_constraint_computed,cudaEventDisableTiming);
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
cudaMallocHost(&domain->dtcourant_h,sizeof(Real_t),0);
cudaMallocHost(&domain->dthydro_h,sizeof(Real_t),0);
cudaMallocHost(&domain->bad_vol_h,sizeof(Index_t),0);
cudaMallocHost(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
domain->deltatime_h = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}
/******************* to support region *********************/
void Domain::sortRegions(Vector_h<Int_t>& regReps_h, Vector_h<Index_t>& regSorted_h)
{
Index_t temp;
Vector_h<Index_t> regIndex;
regIndex.resize(numReg);
for(int i = 0; i < numReg; i++)
regIndex[i] = i;
for(int i = 0; i < numReg-1; i++)
for(int j = 0; j < numReg-i-1; j++)
if(regReps_h[j] < regReps_h[j+1])
{
temp = regReps_h[j];
regReps_h[j] = regReps_h[j+1];
regReps_h[j+1] = temp;
temp = regElemSize[j];
regElemSize[j] = regElemSize[j+1];
regElemSize[j+1] = temp;
temp = regIndex[j];
regIndex[j] = regIndex[j+1];
regIndex[j+1] = temp;
}
for(int i = 0; i < numReg; i++)
regSorted_h[regIndex[i]] = i;
}
// simple function for int pow x^y, y >= 0
static Int_t POW(Int_t x, Int_t y)
{
Int_t res = 1;
for (Int_t i = 0; i < y; i++)
res *= x;
return res;
}
void Domain::CreateRegionIndexSets(Int_t nr, Int_t b)
{
#if USE_MPI
Index_t myRank;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
srand(myRank);
#else
srand(0);
Index_t myRank = 0;
#endif
numReg = nr;
balance = b;
regElemSize = new Int_t[numReg];
Index_t nextIndex = 0;
Vector_h<Int_t> regCSR_h(regCSR.size()); // records the begining and end of each region
Vector_h<Int_t> regReps_h(regReps.size()); // records the rep number per region
Vector_h<Index_t> regNumList_h(regNumList.size()); // Region number per domain element
Vector_h<Index_t> regElemlist_h(regElemlist.size()); // region indexset
Vector_h<Index_t> regSorted_h(regSorted.size()); // keeps index of sorted regions
//if we only have one region just fill it
// Fill out the regNumList with material numbers, which are always
// the region index plus one
if(numReg == 1) {
while (nextIndex < numElem) {
regNumList_h[nextIndex] = 1;
nextIndex++;
}
regElemSize[0] = 0;
}
//If we have more than one region distribute the elements.
else {
Int_t regionNum;
Int_t regionVar;
Int_t lastReg = -1;
Int_t binSize;
Int_t elements;
Index_t runto = 0;
Int_t costDenominator = 0;
Int_t* regBinEnd = new Int_t[numReg];
//Determine the relative weights of all the regions.
for (Index_t i=0 ; i<numReg ; ++i) {
regElemSize[i] = 0;
costDenominator += POW((i+1), balance); //Total cost of all regions
regBinEnd[i] = costDenominator; //Chance of hitting a given region is (regBinEnd[i] - regBinEdn[i-1])/costDenominator
}
//Until all elements are assigned
while (nextIndex < numElem) {
//pick the region
regionVar = rand() % costDenominator;
Index_t i = 0;
while(regionVar >= regBinEnd[i])
i++;
//rotate the regions based on MPI rank. Rotation is Rank % NumRegions
regionNum = ((i + myRank) % numReg) + 1;
// make sure we don't pick the same region twice in a row
while(regionNum == lastReg) {
regionVar = rand() % costDenominator;
i = 0;
while(regionVar >= regBinEnd[i])
i++;
regionNum = ((i + myRank) % numReg) + 1;
}
//Pick the bin size of the region and determine the number of elements.
binSize = rand() % 1000;
if(binSize < 773) {
elements = rand() % 15 + 1;
}
else if(binSize < 937) {
elements = rand() % 16 + 16;
}
else if(binSize < 970) {
elements = rand() % 32 + 32;
}
else if(binSize < 974) {
elements = rand() % 64 + 64;
}
else if(binSize < 978) {
elements = rand() % 128 + 128;
}
else if(binSize < 981) {
elements = rand() % 256 + 256;
}
else
elements = rand() % 1537 + 512;
runto = elements + nextIndex;
//Store the elements. If we hit the end before we run out of elements then just stop.
while (nextIndex < runto && nextIndex < numElem) {
regNumList_h[nextIndex] = regionNum;
nextIndex++;
}
lastReg = regionNum;
}
}
// Convert regNumList to region index sets
// First, count size of each region
for (Index_t i=0 ; i<numElem ; ++i) {
int r = regNumList_h[i]-1; // region index == regnum-1
regElemSize[r]++;
}
Index_t rep;
// Second, allocate each region index set
for (Index_t r=0; r<numReg ; ++r) {
if(r < numReg/2)
rep = 1;
else if(r < (numReg - (numReg+15)/20))
rep = 1 + cost;
else
rep = 10 * (1+ cost);
regReps_h[r] = rep;
}
sortRegions(regReps_h, regSorted_h);
regCSR_h[0] = 0;
// Second, allocate each region index set
for (Index_t i=1 ; i<numReg ; ++i) {
regCSR_h[i] = regCSR_h[i-1] + regElemSize[i-1];
}
// Third, fill index sets
for (Index_t i=0 ; i<numElem ; ++i) {
Index_t r = regSorted_h[regNumList_h[i]-1]; // region index == regnum-1
regElemlist_h[regCSR_h[r]] = i;
regCSR_h[r]++;
}
// Copy to device
regCSR = regCSR_h; // records the begining and end of each region
regReps = regReps_h; // records the rep number per region
regNumList = regNumList_h; // Region number per domain element
regElemlist = regElemlist_h; // region indexset
regSorted = regSorted_h; // keeps index of sorted regions
} // end of create function
static inline
void TimeIncrement(Domain* domain)
{
// To make sure dtcourant and dthydro have been updated on host
cudaEventSynchronize(domain->time_constraint_computed);
Real_t targetdt = domain->stoptime - domain->time_h;
if ((domain->dtfixed <= Real_t(0.0)) && (domain->cycle != Int_t(0))) {
Real_t ratio ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt;
if ( *(domain->dtcourant_h) < gnewdt) {
gnewdt = *(domain->dtcourant_h) / Real_t(2.0) ;
}
if ( *(domain->dthydro_h) < gnewdt) {
gnewdt = *(domain->dthydro_h) * Real_t(2.0) / Real_t(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1,
((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE),
MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
Real_t olddt = domain->deltatime_h;
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain->deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > domain->deltatimemultub) {
newdt = olddt*domain->deltatimemultub ;
}
}
if (newdt > domain->dtmax) {
newdt = domain->dtmax ;
}
domain->deltatime_h = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain->deltatime_h) &&
(targetdt < (Real_t(4.0) * domain->deltatime_h / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain->deltatime_h / Real_t(3.0) ;
}
if (targetdt < domain->deltatime_h) {
domain->deltatime_h = targetdt ;
}
domain->time_h += domain->deltatime_h ;
++domain->cycle ;
}
__device__
static
__forceinline__
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
/*
b[0][4] = - cjxxi - cjxet + cjxze;
b[0][5] = + cjxxi - cjxet + cjxze;
b[0][6] = + cjxxi + cjxet + cjxze;
b[0][7] = - cjxxi + cjxet + cjxze;
*/
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
static
__device__
__forceinline__
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
static
__device__
__forceinline__
void SumElemFaceNormal_warp_per_4cell(
Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
const Real_t x, const Real_t y, const Real_t z,
int node,
int n0, int n1, int n2, int n3)
{
Real_t coef0 = Real_t(0.5);
Real_t coef1 = Real_t(0.5);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
if (node == n0 || node == n1)
coef0 = -coef0;
if (node == n0 || node == n3)
coef1 = -coef1;
}
else
{
coef0 = Real_t(0.);
coef1 = Real_t(0.);
}
Real_t bisectX0 = coef0*x;
Real_t bisectY0 = coef0*y;
Real_t bisectZ0 = coef0*z;
Real_t bisectX1 = coef1*x;
Real_t bisectY1 = coef1*y;
Real_t bisectZ1 = coef1*z;
SumOverNodesShfl(bisectX0);
SumOverNodesShfl(bisectY0);
SumOverNodesShfl(bisectZ0);
SumOverNodesShfl(bisectX1);
SumOverNodesShfl(bisectY1);
SumOverNodesShfl(bisectZ1);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
*normalX0 += areaX;
*normalY0 += areaY;
*normalZ0 += areaZ;
}
}
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
__global__
void AddNodeForcesFromElems_kernel( Index_t numNode,
Index_t padded_numNode,
const Int_t* nodeElemCount,
const Int_t* nodeElemStart,
const Index_t* nodeElemCornerList,
const Real_t* fx_elem,
const Real_t* fy_elem,
const Real_t* fz_elem,
Real_t* fx_node,
Real_t* fy_node,
Real_t* fz_node,
const Int_t num_threads)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < num_threads)
{
Index_t g_i = tid;
Int_t count=nodeElemCount[g_i];
Int_t start=nodeElemStart[g_i];
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++)
{
Index_t pos=nodeElemCornerList[start+j]; // Uncoalesced access here
fx += fx_elem[pos];
fy += fy_elem[pos];
fz += fz_elem[pos];
}
fx_node[g_i]=fx;
fy_node[g_i]=fy;
fz_node[g_i]=fz;
}
}
static
__device__
__forceinline__
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
static
__device__
__forceinline__
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
static
__device__
__forceinline__
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}
__device__
__forceinline__
void CalcHourglassModes(const Real_t xn[8], const Real_t yn[8], const Real_t zn[8],
const Real_t dvdxn[8], const Real_t dvdyn[8], const Real_t dvdzn[8],
Real_t hourgam[8][4], Real_t volinv)
{
Real_t hourmodx, hourmody, hourmodz;
hourmodx = xn[0] + xn[1] - xn[2] - xn[3] - xn[4] - xn[5] + xn[6] + xn[7];
hourmody = yn[0] + yn[1] - yn[2] - yn[3] - yn[4] - yn[5] + yn[6] + yn[7];
hourmodz = zn[0] + zn[1] - zn[2] - zn[3] - zn[4] - zn[5] + zn[6] + zn[7]; // 21
hourgam[0][0] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][0] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][0] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][0] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][0] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][0] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][0] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][0] = 1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz); // 60
hourmodx = xn[0] - xn[1] - xn[2] + xn[3] - xn[4] + xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] - yn[2] + yn[3] - yn[4] + yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] - zn[2] + zn[3] - zn[4] + zn[5] + zn[6] - zn[7];
hourgam[0][1] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][1] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][1] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][1] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][1] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][1] = 1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][1] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][1] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = xn[0] - xn[1] + xn[2] - xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] + yn[2] - yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] + zn[2] - zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][2] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][2] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][2] = 1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][2] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][2] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][2] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][2] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][2] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = -xn[0] + xn[1] - xn[2] + xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = -yn[0] + yn[1] - yn[2] + yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = -zn[0] + zn[1] - zn[2] + zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][3] = -1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][3] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][3] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][3] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][3] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][3] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][3] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][3] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
}
template< bool hourg_gt_zero >
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,4)
#else
__launch_bounds__(64,8)
#endif
void CalcVolumeForceForElems_kernel(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
const Real_t* __restrict__ x, const Real_t* __restrict__ y, const Real_t* __restrict__ z,
const Real_t* __restrict__ xd, const Real_t* __restrict__ yd, const Real_t* __restrict__ zd,
//TextureObj<Real_t> x, TextureObj<Real_t> y, TextureObj<Real_t> z,
//TextureObj<Real_t> xd, TextureObj<Real_t> yd, TextureObj<Real_t> zd,
//TextureObj<Real_t>* x, TextureObj<Real_t>* y, TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd, TextureObj<Real_t>* yd, TextureObj<Real_t>* zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn[8],yn[8],zn[8];;
Real_t xdn[8],ydn[8],zdn[8];;
Real_t dvdxn[8],dvdyn[8],dvdzn[8];;
Real_t hgfx[8],hgfy[8],hgfz[8];;
Real_t hourgam[8][4];
Real_t coefficient;
int elem=blockDim.x*blockIdx.x+threadIdx.x;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t n[8];
#pragma unroll
for (int i=0;i<8;i++) {
n[i] = nodelist[elem+i*padded_numElem];
}
Real_t volinv = Real_t(1.0) / det;
//#pragma unroll
//for (int i=0;i<8;i++) {
// xn[i] =x[n[i]];
// yn[i] =y[n[i]];
// zn[i] =z[n[i]];
//}
#pragma unroll
for (int i=0;i<8;i++)
xn[i] =x[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
yn[i] =y[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zn[i] =z[n[i]];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
CalcElemVolumeDerivative(dvdxn, dvdyn, dvdzn, xn, yn, zn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
CalcHourglassModes(xn,yn,zn,dvdxn,dvdyn,dvdzn,hourgam,volinv);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t B[3][8];
CalcElemShapeFunctionDerivatives(xn, yn, zn, B, &det);
CalcElemNodeNormals( B[0] , B[1], B[2], xn, yn, zn);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
#pragma unroll
for (int i=0;i<8;i++)
{
hgfx[i] = -( sigxx*B[0][i] );
hgfy[i] = -( sigxx*B[1][i] );
hgfz[i] = -( sigxx*B[2][i] );
}
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
// #pragma unroll
// for (int i=0;i<8;i++) {
// xdn[i] =xd[n[i]];
// ydn[i] =yd[n[i]];
// zdn[i] =zd[n[i]];
// }
#pragma unroll
for (int i=0;i<8;i++)
xdn[i] =xd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
ydn[i] =yd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zdn[i] =zd[n[i]];
CalcElemFBHourglassForce
( &xdn[0],&ydn[0],&zdn[0],
hourgam[0],hourgam[1],hourgam[2],hourgam[3],
hourgam[4],hourgam[5],hourgam[6],hourgam[7],
coefficient,
&hgfx[0],&hgfy[0],&hgfz[0]
);
}
#ifdef DOUBLE_PRECISION
#pragma unroll
for (int node=0;node<8;node++)
{
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx[node];
fy_elem[store_loc]=hgfy[node];
fz_elem[store_loc]=hgfz[node];
}
#else
#pragma unroll
for (int i=0;i<8;i++)
{
Index_t ni= n[i];
atomicAdd(&fx_node[ni],hgfx[i]);
atomicAdd(&fy_node[ni],hgfy[i]);
atomicAdd(&fz_node[ni],hgfz[i]);
}
#endif
} // If elem < numElem
}
template< bool hourg_gt_zero, int cta_size>
__global__
void CalcVolumeForceForElems_kernel_warp_per_4cell(
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
//const Real_t __restrict__ *x, const Real_t __restrict__ *y, const Real_t __restrict__ *z,
//const Real_t __restrict__ *xd, const Real_t __restrict__ *yd, const Real_t __restrict__ *zd,
const Real_t *x, const Real_t *y, const Real_t *z,
const Real_t *xd, const Real_t *yd, const Real_t *zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)
{
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn,yn,zn;;
Real_t xdn,ydn,zdn;;
Real_t dvdxn,dvdyn,dvdzn;;
Real_t hgfx,hgfy,hgfz;;
Real_t hourgam[4];
Real_t coefficient;
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int elem = tid >> 3; // elem = tid/8
int node = tid & 7; // node = tid%8
// elem within cta
// int cta_elem = threadIdx.x/8;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t node_id;
node_id = nodelist[elem+node*padded_numElem];
Real_t volinv = Real_t(1.0) / det;
xn =x[node_id];
yn =y[node_id];
zn =z[node_id];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
// Use octal number to represent the indices for each node
//ind0 = 012307456;
//ind1 = 023016745;
//ind2 = 030125674;
//ind3 = 045670123;
//ind4 = 056743012;
//ind5 = 074561230;
//int mask = 7u << (3*node;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
}
VOLUDER(utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
dvdxn);
VOLUDER(utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
dvdyn);
VOLUDER(utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
dvdzn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
Real_t hourmodx, hourmody, hourmodz;
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t b[3];
/*************************************************/
//CalcElemShapeFunctionDerivatives_warp_per_4cell(xn, yn, zn, B, &det);
/*************************************************/
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
fjxxi = fjxet = fjxze = Real_t(0.125)*xn;
fjyxi = fjyet = fjyze = Real_t(0.125)*yn;
fjzxi = fjzet = fjzze = Real_t(0.125)*zn;
if (node==0 || node==3 || node==7 || node==4)
{
fjxxi = -fjxxi;
fjyxi = -fjyxi;
fjzxi = -fjzxi;
}
if (node==0 || node==5 || node==1 || node==4)
{
fjxet = -fjxet;
fjyet = -fjyet;
fjzet = -fjzet;
}
if (node==0 || node==3 || node==1 || node==2)
{
fjxze = -fjxze;
fjyze = -fjyze;
fjzze = -fjzze;
}
SumOverNodesShfl(fjxxi);
SumOverNodesShfl(fjxet);
SumOverNodesShfl(fjxze);
SumOverNodesShfl(fjyxi);
SumOverNodesShfl(fjyet);
SumOverNodesShfl(fjyze);
SumOverNodesShfl(fjzxi);
SumOverNodesShfl(fjzet);
SumOverNodesShfl(fjzze);
/* compute cofactors */
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
Real_t coef_xi, coef_et, coef_ze;
if (node==0 || node==3 || node==4 || node==7)
coef_xi = Real_t(-1.);
else
coef_xi = Real_t(1.);
if (node==0 || node==1 || node==4 || node==5)
coef_et = Real_t(-1.);
else
coef_et = Real_t(1.);
if (node==0 || node==1 || node==2 || node==3)
coef_ze = Real_t(-1.);
else
coef_ze = Real_t(1.);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0] = coef_xi * cjxxi + coef_et * cjxet + coef_ze * cjxze;
b[1] = coef_xi * cjyxi + coef_et * cjyet + coef_ze * cjyze;
b[2] = coef_xi * cjzxi + coef_et * cjzet + coef_ze * cjzze;
/* calculate jacobian determinant (volume) */
det = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
/*************************************************/
//CalcElemNodeNormals_warp_per_4cell( B[0] , B[1], B[2], xn, yn, zn);
/*************************************************/
b[0] = Real_t(0.0);
b[1] = Real_t(0.0);
b[2] = Real_t(0.0);
// Six faces, if no
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,1,2,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,4,5,1);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 1,5,6,2);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 2,6,7,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 3,7,4,0);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 4,7,6,5);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
hgfx = -( sigxx*b[0] );
hgfy = -( sigxx*b[1] );
hgfz = -( sigxx*b[2] );
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
xdn = xd[node_id];
ydn = yd[node_id];
zdn = zd[node_id];
Real_t hgfx_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xdn;
SumOverNodesShfl(h);
hgfx_temp+=hourgam[i]*h;
}
hgfx_temp *= coefficient;
hgfx += hgfx_temp;
Real_t hgfy_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*ydn;
SumOverNodesShfl(h);
hgfy_temp+=hourgam[i]*h;
}
hgfy_temp *= coefficient;
hgfy += hgfy_temp;
Real_t hgfz_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zdn;
SumOverNodesShfl(h);
hgfz_temp+=hourgam[i]*h;
}
hgfz_temp *= coefficient;
hgfz += hgfz_temp;
}
#ifdef DOUBLE_PRECISION
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx;
fy_elem[store_loc]=hgfy;
fz_elem[store_loc]=hgfz;
#else
atomicAdd(&fx_node[node_id],hgfx);
atomicAdd(&fy_node[node_id],hgfy);
atomicAdd(&fz_node[node_id],hgfz);
#endif
} // If elem < numElem
}
static inline
void CalcVolumeForceForElems(const Real_t hgcoef,Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
int num_threads = numElem ;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel<true> <<<dimGrid,block_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel<false> <<<dimGrid,block_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,block_size);
AddNodeForcesFromElems_kernel<<<dimGrid,block_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
// cudaDeviceSynchronize();
// cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
/*
static inline
void CalcVolumeForceForElems_warp_per_4cell(const Real_t hgcoef,Domain *domain)
{
// We're gonna map one warp per 4 cells, i.e. one thread per vertex
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
const int warps_per_cta = 2;
const int cta_size = warps_per_cta * 32;
int num_threads = numElem*8;
int dimGrid = PAD_DIV(num_threads,cta_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel_warp_per_4cell<true, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
//domain->tex_x, domain->tex_y, domain->tex_z, domain->tex_xd, domain->tex_yd, domain->tex_zd,
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel_warp_per_4cell<false, cta_size> <<<dimGrid,cta_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(),
domain->y.raw(),
domain->z.raw(),
domain->xd.raw(),
domain->yd.raw(),
domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,cta_size);
AddNodeForcesFromElems_kernel<<<dimGrid,cta_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
//cudaDeviceSynchronize();
//cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}
*/
static inline
void CalcVolumeForceForElems(Domain* domain)
{
const Real_t hgcoef = domain->hgcoef ;
CalcVolumeForceForElems(hgcoef,domain);
//CalcVolumeForceForElems_warp_per_4cell(hgcoef,domain);
}
static inline void checkErrors(Domain* domain,int its,int myRank)
{
if (*(domain->bad_vol_h) != -1)
{
printf("Rank %i: Volume Error in cell %d at iteration %d\n",myRank,*(domain->bad_vol_h),its);
exit(VolumeError);
}
if (*(domain->bad_q_h) != -1)
{
printf("Rank %i: Q Error in cell %d at iteration %d\n",myRank,*(domain->bad_q_h),its);
exit(QStopError);
}
}
static inline void CalcForceForNodes(Domain *domain)
{
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false) ;
#endif
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
TimeIncrement(domain);
#if USE_MPI
// initialize pointers
domain->d_fx = domain->fx.raw();
domain->d_fy = domain->fy.raw();
domain->d_fz = domain->fz.raw();
Domain_member fieldData[3] ;
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2]) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2]) ;
#endif
}
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < numNode)
{
Real_t one_over_nMass = Real_t(1.)/nodalMass[tid];
xdd[tid]=fx[tid]*one_over_nMass;
ydd[tid]=fy[tid]*one_over_nMass;
zdd[tid]=fz[tid]*one_over_nMass;
}
}
static inline
void CalcAccelerationForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
CalcAccelerationForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numNode,
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw(),
domain->fx.raw(),domain->fy.raw(),domain->fz.raw(),
domain->nodalMass.raw());
//cudaDeviceSynchronize();
//cudaCheckError();
}
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xyzdd,
Index_t *symm)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNodeBC)
{
xyzdd[symm[i]] = Real_t(0.0) ;
}
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes(Domain *domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numSymmX,dimBlock);
if (domain->numSymmX > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmX,
domain->xdd.raw(),
domain->symmX.raw());
dimGrid = PAD_DIV(domain->numSymmY,dimBlock);
if (domain->numSymmY > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmY,
domain->ydd.raw(),
domain->symmY.raw());
dimGrid = PAD_DIV(domain->numSymmZ,dimBlock);
if (domain->numSymmZ > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmZ,
domain->zdd.raw(),
domain->symmZ.raw());
}
__global__
void CalcPositionAndVelocityForNodes_kernel(int numNode,
const Real_t deltatime,
const Real_t u_cut,
Real_t* __restrict__ x, Real_t* __restrict__ y, Real_t* __restrict__ z,
Real_t* __restrict__ xd, Real_t* __restrict__ yd, Real_t* __restrict__ zd,
const Real_t* __restrict__ xdd, const Real_t* __restrict__ ydd, const Real_t* __restrict__ zdd)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNode)
{
Real_t xdtmp, ydtmp, zdtmp, dt;
dt = deltatime;
xdtmp = xd[i] + xdd[i] * dt ;
ydtmp = yd[i] + ydd[i] * dt ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;
if( FABS(ydtmp) < u_cut ) ydtmp = 0.0;
if( FABS(zdtmp) < u_cut ) zdtmp = 0.0;
x[i] += xdtmp * dt;
y[i] += ydtmp * dt;
z[i] += zdtmp * dt;
xd[i] = xdtmp;
yd[i] = ydtmp;
zd[i] = zdtmp;
}
}
static inline
void CalcPositionAndVelocityForNodes(const Real_t u_cut, Domain* domain)
{
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
CalcPositionAndVelocityForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numNode,domain->deltatime_h,u_cut,
domain->x.raw(),domain->y.raw(),domain->z.raw(),
domain->xd.raw(),domain->yd.raw(),domain->zd.raw(),
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw());
//cudaDeviceSynchronize();
//cudaCheckError();
}
static inline
void LagrangeNodal(Domain *domain)
{
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false) ;
#endif
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
CalcPositionAndVelocityForNodes(u_cut, domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
// initialize pointers
domain->d_x = domain->x.raw();
domain->d_y = domain->y.raw();
domain->d_z = domain->z.raw();
domain->d_xd = domain->xd.raw();
domain->d_yd = domain->yd.raw();
domain->d_zd = domain->zd.raw();
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2]) ;
CommSyncPosVelGpu(*domain, &domain->streams[2]) ;
#endif
#endif
return;
}
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t temp = (fx * gx + fy * gy + fz * gz);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
temp * temp;
return area ;
}
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ; // 38
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}
__device__
static
__forceinline__
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
Real_t tmp1 = (xvel[0]-xvel[6]);
Real_t tmp2 = (xvel[1]-xvel[7]);
Real_t tmp3 = (xvel[2]-xvel[4]);
Real_t tmp4 = (xvel[3]-xvel[5]);
d[0] = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dxddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dxddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (yvel[0]-yvel[6]);
tmp2 = (yvel[1]-yvel[7]);
tmp3 = (yvel[2]-yvel[4]);
tmp4 = (yvel[3]-yvel[5]);
d[1] = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dyddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dyddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (zvel[0]-zvel[6]);
tmp2 = (zvel[1]-zvel[7]);
tmp3 = (zvel[2]-zvel[4]);
tmp4 = (zvel[3]-zvel[5]);
d[2] = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
dzddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dzddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
static __device__ __forceinline__
void CalcMonoGradient(Real_t *x, Real_t *y, Real_t *z,
Real_t *xv, Real_t *yv, Real_t *zv,
Real_t vol,
Real_t *delx_zeta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delv_xi,
Real_t *delx_eta,
Real_t *delv_eta)
{
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x[0],x[1],x[5],x[4]) - SUM4(x[3],x[2],x[6],x[7])) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y[0],y[1],y[5],y[4]) - SUM4(y[3],y[2],y[6],y[7])) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z[0],z[1],z[5],z[4]) - SUM4(z[3],z[2],z[6],z[7])) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x[1],x[2],x[6],x[5]) - SUM4(x[0],x[3],x[7],x[4])) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y[1],y[2],y[6],y[5]) - SUM4(y[0],y[3],y[7],y[4])) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z[1],z[2],z[6],z[5]) - SUM4(z[0],z[3],z[7],z[4])) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x[4],x[5],x[6],x[7]) - SUM4(x[0],x[1],x[2],x[3])) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y[4],y[5],y[6],y[7]) - SUM4(y[0],y[1],y[2],y[3])) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z[4],z[5],z[6],z[7]) - SUM4(z[0],z[1],z[2],z[3])) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
*delx_zeta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[4],xv[5],xv[6],xv[7]) - SUM4(xv[0],xv[1],xv[2],xv[3])) ;
dyv = Real_t(0.25)*(SUM4(yv[4],yv[5],yv[6],yv[7]) - SUM4(yv[0],yv[1],yv[2],yv[3])) ;
dzv = Real_t(0.25)*(SUM4(zv[4],zv[5],zv[6],zv[7]) - SUM4(zv[0],zv[1],zv[2],zv[3])) ;
*delv_zeta = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
*delx_xi = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[1],xv[2],xv[6],xv[5]) - SUM4(xv[0],xv[3],xv[7],xv[4])) ;
dyv = Real_t(0.25)*(SUM4(yv[1],yv[2],yv[6],yv[5]) - SUM4(yv[0],yv[3],yv[7],yv[4])) ;
dzv = Real_t(0.25)*(SUM4(zv[1],zv[2],zv[6],zv[5]) - SUM4(zv[0],zv[3],zv[7],zv[4])) ;
*delv_xi = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
*delx_eta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv[0],xv[1],xv[5],xv[4]) - SUM4(xv[3],xv[2],xv[6],xv[7])) ;
dyv = Real_t(-0.25)*(SUM4(yv[0],yv[1],yv[5],yv[4]) - SUM4(yv[3],yv[2],yv[6],yv[7])) ;
dzv = Real_t(-0.25)*(SUM4(zv[0],zv[1],zv[5],zv[4]) - SUM4(zv[3],zv[2],zv[6],zv[7])) ;
*delv_eta = ax*dxv + ay*dyv + az*dzv ;
#undef SUM4
}
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,6) // 64-bit //YKT: For Volta(64,6) For Minsky(64,8)
#else
__launch_bounds__(64,16) // 32-bit
#endif
void CalcKinematicsAndMonotonicQGradient_kernel(
Index_t numElem, Index_t padded_numElem, const Real_t dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
//TextureObj<Real_t> x,
//TextureObj<Real_t> y,
//TextureObj<Real_t> z,
//TextureObj<Real_t> xd,
//TextureObj<Real_t> yd,
//TextureObj<Real_t> zd,
//TextureObj<Real_t>* x,
//TextureObj<Real_t>* y,
//TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd,
//TextureObj<Real_t>* yd,
//TextureObj<Real_t>* zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)
{
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
//#pragma unroll
//for( Index_t lnode=0 ; lnode<8 ; ++lnode )
//{
// Index_t gnode = nodelist[k+lnode*padded_numElem];
// nodes[lnode] = gnode;
// x_local[lnode] = x[gnode];
// y_local[lnode] = y[gnode];
// z_local[lnode] = z[gnode];
//}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodes[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt;
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
Real_t detJ;
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// ------------------------
// CALC LAGRANGE ELEM 2
// ------------------------
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = D[0] + D[1] + D[2];
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] = D[0] - vdovthird ;
dyy[k] = D[1] - vdovthird ;
dzz[k] = D[2] - vdovthird ;
// ------------------------
// CALC MONOTONIC Q GRADIENT
// ------------------------
Real_t vol = volo[k]*vnew[k];
// Undo x_local update
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j ) {
x_local[j] += dt2 * xd_local[j];
y_local[j] += dt2 * yd_local[j];
z_local[j] += dt2 * zd_local[j];
}
CalcMonoGradient(x_local,y_local,z_local,xd_local,yd_local,zd_local,
vol,
&delx_zeta[k],&delv_zeta[k],&delx_xi[k],
&delv_xi[k], &delx_eta[k], &delv_eta[k]);
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}
static inline
void CalcKinematicsAndMonotonicQGradient(Domain *domain)
{
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
CalcKinematicsAndMonotonicQGradient_kernel<<<dimGrid,block_size>>>
( numElem,padded_numElem, domain->deltatime_h,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t* regElemlist,
// const Index_t* __restrict__ regElemlist,
Index_t *elemBC,
Index_t *lxim,
Index_t *lxip,
Index_t *letam,
Index_t *letap,
Index_t *lzetam,
Index_t *lzetap,
Real_t *delv_xi,
Real_t *delv_eta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delx_eta,
Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq, Real_t *ql,
Real_t *q,
Real_t qstop,
Index_t* bad_q
)
{
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = regElemlist[ielem];
Int_t bcMask = elemBC[i] ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]] ; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
// Don't allow excessive artificial viscosity
if (q[i] > qstop)
*(bad_q) = i;
}
}
static inline
void CalcMonotonicQRegionForElems(Domain *domain)
{
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = domain->monoq_max_slope ;
Real_t monoq_limiter_mult = domain->monoq_limiter_mult ;
Real_t qlc_monoq = domain->qlc_monoq;
Real_t qqc_monoq = domain->qqc_monoq;
Index_t elength = domain->numElem;
Index_t dimBlock= 128;
Index_t dimGrid = PAD_DIV(elength,dimBlock);
CalcMonotonicQRegionForElems_kernel<<<dimGrid,dimBlock>>>
( qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
domain->regElemlist.raw(),domain->elemBC.raw(),
domain->lxim.raw(),domain->lxip.raw(),
domain->letam.raw(),domain->letap.raw(),
domain->lzetam.raw(),domain->lzetap.raw(),
domain->delv_xi->raw(),domain->delv_eta->raw(),domain->delv_zeta->raw(),
domain->delx_xi->raw(),domain->delx_eta->raw(),domain->delx_zeta->raw(),
domain->vdov.raw(),domain->elemMass.raw(),domain->volo.raw(),domain->vnew->raw(),
domain->qq.raw(),domain->ql.raw(),
domain->q.raw(),
domain->qstop,
domain->bad_q_h
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t iz)
{
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t zn)
{
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = zn;
}
}
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc =Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}
__device__ inline
Index_t giveMyRegion(const Index_t* regCSR,const Index_t i, const Index_t numReg)
{
for(Index_t reg = 0; reg < numReg-1; reg++)
if(i < regCSR[reg])
return reg;
return (numReg-1);
}
__global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel(
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* __restrict__ ql,
Real_t* __restrict__ qq,
Real_t* __restrict__ vnew,
Real_t* __restrict__ v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* __restrict__ regElemlist,
// const Index_t* __restrict__ regElemlist,
Real_t* __restrict__ e,
Real_t* __restrict__ delv,
Real_t* __restrict__ p,
Real_t* __restrict__ q,
Real_t ss4o3,
Real_t* __restrict__ ss,
Real_t v_cut,
Index_t* __restrict__ bad_vol,
const Int_t cost,
const Index_t* regCSR,
const Index_t* regReps,
const Index_t numReg
)
{
Real_t e_old, delvc, p_old, q_old, e_temp, delvc_temp, p_temp, q_temp;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, qq_temp, ql_temp, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = regElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,zidx);
/********************** Start EvalEOSForElems **************************/
// Here we need to find out what region this element belongs to and what is the rep value!
Index_t region = giveMyRegion(regCSR,i,numReg);
Index_t rep = regReps[region];
e_temp = e[zidx];
p_temp = p[zidx];
q_temp = q[zidx];
qq_temp = qq[zidx] ;
ql_temp = ql[zidx] ;
delvc_temp = delv[zidx];
for(int r=0; r < rep; r++)
{
e_old = e_temp;
p_old = p_temp;
q_old = q_temp;
qq_old = qq_temp;
ql_old = ql_temp;
delvc = delvc_temp;
work = Real_t(0.);
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
// qq_old = qq[zidx] ;
// ql_old = ql[zidx] ;
// work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
}//end for rep
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,zidx);
/********************** End EvalEOSForElems **************************/
UpdateVolumesForElems_device(length,v_cut,vnew,v,zidx);
}
}
static inline
void ApplyMaterialPropertiesAndUpdateVolume(Domain *domain)
{
Index_t length = domain->numElem ;
if (length != 0) {
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(length,dimBlock);
ApplyMaterialPropertiesAndUpdateVolume_kernel<<<dimGrid,dimBlock>>>
(length,
domain->refdens,
domain->e_cut,
domain->emin,
domain->ql.raw(),
domain->qq.raw(),
domain->vnew->raw(),
domain->v.raw(),
domain->pmin,
domain->p_cut,
domain->q_cut,
domain->eosvmin,
domain->eosvmax,
domain->regElemlist.raw(),
domain->e.raw(),
domain->delv.raw(),
domain->p.raw(),
domain->q.raw(),
domain->ss4o3,
domain->ss.raw(),
domain->v_cut,
domain->bad_vol_h,
domain->cost,
domain->regCSR.raw(),
domain->regReps.raw(),
domain->numReg
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
}
static inline
void LagrangeElements(Domain *domain)
{
int allElem = domain->numElem + /* local elem */
2*domain->sizeX*domain->sizeY + /* plane ghosts */
2*domain->sizeX*domain->sizeZ + /* row ghosts */
2*domain->sizeY*domain->sizeZ ; /* col ghosts */
domain->vnew = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dxx = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dyy = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dzz = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_xi = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_eta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
#if USE_MPI
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true) ;
#endif
/*********************************************/
/* Calc Kinematics and Monotic Q Gradient */
/*********************************************/
CalcKinematicsAndMonotonicQGradient(domain);
#if USE_MPI
Domain_member fieldData[3] ;
// initialize pointers
domain->d_delv_xi = domain->delv_xi->raw();
domain->d_delv_eta = domain->delv_eta->raw();
domain->d_delv_zeta = domain->delv_zeta->raw();
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2]) ;
CommMonoQGpu(*domain, domain->streams[2]) ;
#endif
Allocator<Vector_d<Real_t> >::free(domain->dxx,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dyy,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dzz,domain->numElem);
/**********************************
* Calc Monotic Q Region
**********************************/
CalcMonotonicQRegionForElems(domain);
Allocator<Vector_d<Real_t> >::free(domain->delx_xi,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_eta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_zeta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_zeta,allElem);
// printf("\n --Start of ApplyMaterials! \n");
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// printf("\n --End of ApplyMaterials! \n");
Allocator<Vector_d<Real_t> >::free(domain->vnew,domain->numElem);
}
template<int block_size>
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcTimeConstraintsForElems_kernel(
Index_t length,
Real_t qqc2,
Real_t dvovmax,
Index_t *matElemlist,
Real_t *ss,
Real_t *vdov,
Real_t *arealg,
Real_t *dev_mindtcourant,
Real_t *dev_mindthydro)
{
int tid = threadIdx.x;
int i=blockDim.x*blockIdx.x + tid;
__shared__ volatile Real_t s_mindthydro[block_size];
__shared__ volatile Real_t s_mindtcourant[block_size];
Real_t mindthydro = Real_t(1.0e+20) ;
Real_t mindtcourant = Real_t(1.0e+20) ;
Real_t dthydro = mindthydro;
Real_t dtcourant = mindtcourant;
while (i<length) {
Index_t indx = matElemlist[i] ;
Real_t vdov_tmp = vdov[indx];
// Computing dt_hydro
if (vdov_tmp != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov_tmp)+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
if (dthydro < mindthydro)
mindthydro = dthydro;
// Computing dt_courant
Real_t ss_tmp = ss[indx];
Real_t area_tmp = arealg[indx];
Real_t dtf = ss_tmp * ss_tmp ;
dtf += ((vdov_tmp < 0.) ? qqc2*area_tmp*area_tmp*vdov_tmp*vdov_tmp : 0.);
dtf = area_tmp / SQRT(dtf) ;
/* determine minimum timestep with its corresponding elem */
if (vdov_tmp != Real_t(0.) && dtf < dtcourant) {
dtcourant = dtf ;
}
if (dtcourant< mindtcourant)
mindtcourant= dtcourant;
i += gridDim.x*blockDim.x;
}
s_mindthydro[tid] = mindthydro;
s_mindtcourant[tid] = mindtcourant;
__syncthreads();
// Do shared memory reduction
if (block_size >= 1024) {
if (tid < 512) {
s_mindthydro[tid] = min( s_mindthydro[tid] , s_mindthydro[tid + 512]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 512]) ; }
__syncthreads(); }
if (block_size >= 512) {
if (tid < 256) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 256]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 256]) ; }
__syncthreads(); }
if (block_size >= 256) {
if (tid < 128) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 128]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 128]) ; }
__syncthreads(); }
if (block_size >= 128) {
if (tid < 64) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 64]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 64]) ; }
__syncthreads(); }
if (tid < 32) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 32]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 32]) ;
}
if (tid < 16) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 16]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 16]) ;
}
if (tid < 8) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 8]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 8]) ;
}
if (tid < 4) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 4]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 4]) ;
}
if (tid < 2) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 2]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 2]) ;
}
if (tid < 1) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 1]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 1]) ;
}
// Store in global memory
if (tid==0) {
dev_mindtcourant[blockIdx.x] = s_mindtcourant[0];
dev_mindthydro[blockIdx.x] = s_mindthydro[0];
}
}
template <int block_size>
__global__
void CalcMinDtOneBlock(Real_t* dev_mindthydro, Real_t* dev_mindtcourant, Real_t* dtcourant, Real_t* dthydro, Index_t shared_array_size)
{
volatile __shared__ Real_t s_data[block_size];
int tid = threadIdx.x;
if (blockIdx.x==0)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindtcourant[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dtcourant)= s_data[0];
}
}
else if (blockIdx.x==1)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindthydro[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dthydro) = s_data[0];
}
}
}
static inline
void CalcTimeConstraintsForElems(Domain* domain)
{
Real_t qqc = domain->qqc;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dvovmax = domain->dvovmax ;
const Index_t length = domain->numElem;
const int max_dimGrid = 1024;
const int dimBlock = 128;
int dimGrid=std::min(max_dimGrid,PAD_DIV(length,dimBlock));
cudaFuncSetCacheConfig(CalcTimeConstraintsForElems_kernel<dimBlock>, cudaFuncCachePreferShared);
Vector_d<Real_t>* dev_mindtcourant= Allocator< Vector_d<Real_t> >::allocate(dimGrid);
Vector_d<Real_t>* dev_mindthydro = Allocator< Vector_d<Real_t> >::allocate(dimGrid);
CalcTimeConstraintsForElems_kernel<dimBlock> <<<dimGrid,dimBlock>>>
(length,qqc2,dvovmax,
domain->matElemlist.raw(),domain->ss.raw(),domain->vdov.raw(),domain->arealg.raw(),
dev_mindtcourant->raw(),dev_mindthydro->raw());
// TODO: if dimGrid < 1024, should launch less threads
CalcMinDtOneBlock<max_dimGrid> <<<2,max_dimGrid, max_dimGrid*sizeof(Real_t), domain->streams[1]>>>(dev_mindthydro->raw(),dev_mindtcourant->raw(),domain->dtcourant_h,domain->dthydro_h, dimGrid);
cudaEventRecord(domain->time_constraint_computed,domain->streams[1]);
Allocator<Vector_d<Real_t> >::free(dev_mindtcourant,dimGrid);
Allocator<Vector_d<Real_t> >::free(dev_mindthydro,dimGrid);
}
static inline
void LagrangeLeapFrog(Domain* domain)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal(domain);
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(domain);
CalcTimeConstraintsForElems(domain);
}
void printUsage(char* argv[])
{
printf("Usage: \n");
printf("Unstructured grid: %s -u <file.lmesh> \n", argv[0]) ;
printf("Structured grid: %s -s numEdgeElems \n", argv[0]) ;
printf("\nExamples:\n") ;
printf("%s -s 45\n", argv[0]) ;
printf("%s -u sedov15oct.lmesh\n", argv[0]) ;
}
#ifdef SAMI
#ifdef __cplusplus
extern "C" {
#endif
#include "silo.h"
#ifdef __cplusplus
}
#endif
#define MAX_LEN_SAMI_HEADER 10
#define SAMI_HDR_NUMBRICK 0
#define SAMI_HDR_NUMNODES 3
#define SAMI_HDR_NUMMATERIAL 4
#define SAMI_HDR_INDEX_START 6
#define SAMI_HDR_MESHDIM 7
#define MAX_ADJACENCY 14 /* must be 14 or greater */
void DumpSAMI(Domain *domain, char *name)
{
DBfile *fp ;
int headerLen = MAX_LEN_SAMI_HEADER ;
int headerInfo[MAX_LEN_SAMI_HEADER];
char varName[] = "brick_nd0";
char coordName[] = "x";
int version = 121 ;
int numElem = int(domain->numElem) ;
int numNode = int(domain->numNode) ;
int count ;
int *materialID ;
int *nodeConnect ;
double *nodeCoord ;
if ((fp = DBCreate(name, DB_CLOBBER, DB_LOCAL,
NULL, DB_PDB)) == NULL)
{
printf("Couldn't create file %s\n", name) ;
exit(1);
}
for (int i=0; i<MAX_LEN_SAMI_HEADER; ++i) {
headerInfo[i] = 0 ;
}
headerInfo[SAMI_HDR_NUMBRICK] = numElem ;
headerInfo[SAMI_HDR_NUMNODES] = numNode ;
headerInfo[SAMI_HDR_NUMMATERIAL] = 1 ;
headerInfo[SAMI_HDR_INDEX_START] = 1 ;
headerInfo[SAMI_HDR_MESHDIM] = 3 ;
DBWrite(fp, "mesh_data", headerInfo, &headerLen, 1, DB_INT) ;
count = 1 ;
DBWrite(fp, "version", &version, &count, 1, DB_INT) ;
nodeConnect = new int[numElem] ;
Vector_h<Index_t> nodelist_h = domain->nodelist;
for (Index_t i=0; i<8; ++i)
{
for (Index_t j=0; j<numElem; ++j) {
nodeConnect[j] = int(nodelist_h[i*domain->padded_numElem + j]) + 1 ;
}
varName[8] = '0' + i;
DBWrite(fp, varName, nodeConnect, &numElem, 1, DB_INT) ;
}
delete [] nodeConnect ;
nodeCoord = new double[numNode] ;
Vector_h<Real_t> x_h = domain->x;
Vector_h<Real_t> y_h = domain->y;
Vector_h<Real_t> z_h = domain->z;
for (Index_t i=0; i<3; ++i)
{
for (Index_t j=0; j<numNode; ++j) {
Real_t coordVal ;
switch(i) {
case 0: coordVal = double(x_h[j]) ; break ;
case 1: coordVal = double(y_h[j]) ; break ;
case 2: coordVal = double(z_h[j]) ; break ;
}
nodeCoord[j] = coordVal ;
}
coordName[0] = 'x' + i ;
DBWrite(fp, coordName, nodeCoord, &numNode, 1, DB_DOUBLE) ;
}
delete [] nodeCoord ;
materialID = new int[numElem] ;
for (Index_t i=0; i<numElem; ++i)
materialID[i] = 1 ;
DBWrite(fp, "brick_material", materialID, &numElem, 1, DB_INT) ;
delete [] materialID ;
DBClose(fp);
}
#endif
#ifdef SAMI
void DumpDomain(Domain *domain)
{
char meshName[64] ;
printf("Dumping SAMI file\n");
sprintf(meshName, "sedov_%d.sami", int(domain->cycle)) ;
DumpSAMI(domain, meshName) ;
}
#endif
void write_solution(Domain* locDom)
{
Vector_h<Real_t> x_h = locDom->x;
Vector_h<Real_t> y_h = locDom->y;
Vector_h<Real_t> z_h = locDom->z;
// printf("Writing solution to file xyz.asc\n");
std::stringstream filename;
filename << "xyz.asc";
FILE *fout = fopen(filename.str().c_str(),"wb");
for (Index_t i=0; i<locDom->numNode; i++) {
fprintf(fout,"%10d\n",i);
fprintf(fout,"%.10f\n",x_h[i]);
fprintf(fout,"%.10f\n",y_h[i]);
fprintf(fout,"%.10f\n",z_h[i]);
}
fclose(fout);
}
///////////////////////////////////////////////////////////////////////////
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side)
{
Int_t testProcs;
Int_t dx, dy, dz;
Int_t myDom;
// Assume cube processor layout for now
testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ;
if (testProcs*testProcs*testProcs != numRanks) {
printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) {
printf("MPI operations only support float and double right now...\n");
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (MAX_FIELDS_PER_MPI_COMM > CACHE_COHERENCE_PAD_REAL) {
printf("corner element comm buffers too small. Fix code.\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
dx = testProcs ;
dy = testProcs ;
dz = testProcs ;
// temporary test
if (dx*dy*dz != numRanks) {
printf("error -- must have as many domains as procs\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
Int_t remainder = dx*dy*dz % numRanks ;
if (myRank < remainder) {
myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ;
}
else {
myDom = remainder*( 1+ (dx*dy*dz / numRanks)) +
(myRank - remainder)*(dx*dy*dz/numRanks) ;
}
*col = myDom % dx ;
*row = (myDom / dx) % dy ;
*plane = myDom / (dx*dy) ;
*side = testProcs;
return;
}
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t its,
Int_t nx,
Int_t numRanks)
{
size_t free_mem, total_mem, used_mem;
cudaMemGetInfo(&free_mem, &total_mem);
used_mem= total_mem - free_mem;
#if LULESH_SHOW_PROGRESS == 0
printf(" Used Memory = %8.4f Mb\n", used_mem / (1024.*1024.) );
#endif
// GrindTime1 only takes a single domain into account, and is thus a good way to measure
// processor speed indepdendent of MPI parallelism.
// GrindTime2 takes into account speedups from MPI parallelism
//YKT
size_t elem = nx*nx*nx;
size_t total_elem = elem*numRanks;
Real_t grindTime1 = ((elapsed_time*1e6)/its)/elem;
Real_t grindTime2 = ((elapsed_time*1e6)/its)/total_elem;
//Real_t grindTime1 = ((elapsed_time*1e6)/its)/(nx*nx*nx);
//Real_t grindTime2 = ((elapsed_time*1e6)/its)/(nx*nx*nx*numRanks);
// Copy Energy back to Host
Real_t e_zero;
Real_t* d_ezero_ptr = locDom.e.raw() + locDom.octantCorner; /* octant corner supposed to be 0 */
cudaMemcpy(&e_zero, d_ezero_ptr, sizeof(Real_t), cudaMemcpyDeviceToHost);
printf("Run completed: \n");
printf(" Problem size = %i \n", nx);
printf(" MPI tasks = %i \n", numRanks);
printf(" Iteration count = %i \n", its);
printf(" Final Origin Energy = %12.6e \n", e_zero);
Real_t MaxAbsDiff = Real_t(0.0);
Real_t TotalAbsDiff = Real_t(0.0);
Real_t MaxRelDiff = Real_t(0.0);
Real_t *e_all = new Real_t[nx * nx];
cudaMemcpy(e_all, locDom.e.raw(), nx * nx * sizeof(Real_t), cudaMemcpyDeviceToHost);
for (Index_t j=0; j<nx; ++j) {
for (Index_t k=j+1; k<nx; ++k) {
Real_t AbsDiff = FABS(e_all[j*nx+k]-e_all[k*nx+j]);
TotalAbsDiff += AbsDiff;
if (MaxAbsDiff <AbsDiff) MaxAbsDiff = AbsDiff;
Real_t RelDiff = AbsDiff / e_all[k*nx+j];
if (MaxRelDiff <RelDiff) MaxRelDiff = RelDiff;
}
}
delete e_all;
// Quick symmetry check
printf(" Testing Plane 0 of Energy Array on rank 0:\n");
printf(" MaxAbsDiff = %12.6e\n", MaxAbsDiff );
printf(" TotalAbsDiff = %12.6e\n", TotalAbsDiff );
printf(" MaxRelDiff = %12.6e\n\n", MaxRelDiff );
// Timing information
printf("\nElapsed time = %10.2f (s)\n", elapsed_time);
printf("Grind time (us/z/c) = %10.8g (per dom) (%10.8g overall)\n", grindTime1, grindTime2);
printf("FOM = %10.8g (z/s)\n\n", 1000.0/grindTime2); // zones per second
// hfwen
//bool write_solution_flag=true;
bool write_solution_flag=false;
if (write_solution_flag) {
write_solution(&locDom);
}
return ;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
printUsage(argv);
exit( LFileError );
}
if ( strcmp(argv[1],"-u") != 0 && strcmp(argv[1],"-s") != 0 )
{
printUsage(argv);
exit( LFileError ) ;
}
int num_iters = -1;
if (argc == 5) {
num_iters = atoi(argv[4]);
}
bool structured = ( strcmp(argv[1],"-s") == 0 );
Int_t numRanks ;
Int_t myRank ;
#if USE_MPI
Domain_member fieldData ;
MPI_Init(&argc, &argv) ;
MPI_Comm_size(MPI_COMM_WORLD, &numRanks) ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
#else
numRanks = 1;
myRank = 0;
#endif
//YKT
//trap();
cuda_init(myRank);
/* assume cube subdomain geometry for now */
Index_t nx = atoi(argv[2]);
Domain *locDom ;
// Set up the mesh and decompose. Assumes regular cubes for now
Int_t col, row, plane, side;
InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side);
// TODO: change default nr to 11
Int_t nr = 11;
Int_t balance = 1;
Int_t cost = 1;
// TODO: modify this constructor to account for new fields
// TODO: setup communication buffers
locDom = NewDomain(argv, numRanks, col, row, plane, nx, side, structured, nr, balance, cost);
#if USE_MPI
// copy to the host for mpi transfer
locDom->h_nodalMass = locDom->nodalMass;
fieldData = &Domain::get_nodalMass;
// Initial domain boundary communication
CommRecv(*locDom, MSG_COMM_SBN, 1,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false) ;
CommSend(*locDom, MSG_COMM_SBN, 1, &fieldData,
locDom->sizeX + 1, locDom->sizeY + 1, locDom->sizeZ + 1,
true, false) ;
CommSBN(*locDom, 1, &fieldData) ;
// copy back to the device
locDom->nodalMass = locDom->h_nodalMass;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
/* timestep to solution */
int its=0;
if (myRank == 0) {
if (structured)
printf("Running until t=%f, Problem size=%dx%dx%d\n",locDom->stoptime,nx,nx,nx);
else
printf("Running until t=%f, Problem size=%d \n",locDom->stoptime,locDom->numElem);
}
cudaProfilerStart();
#if USE_MPI
double start = MPI_Wtime();
#else
timeval start;
gettimeofday(&start, NULL) ;
#endif
while(locDom->time_h < locDom->stoptime)
{
// this has been moved after computation of volume forces to hide launch latencies
//TimeIncrement(locDom) ;
LagrangeLeapFrog(locDom) ;
checkErrors(locDom,its,myRank);
#if LULESH_SHOW_PROGRESS
if (myRank == 0 && its % 100 == 0)
printf("cycle = %d, time = %e, dt=%e\n", its+1, double(locDom->time_h), double(locDom->deltatime_h) ) ;
#endif
its++;
if (its == num_iters) break;
}
// make sure GPU finished its work
cudaDeviceSynchronize();
// Use reduced max elapsed time
double elapsed_time;
#if USE_MPI
elapsed_time = MPI_Wtime() - start;
#else
timeval end;
gettimeofday(&end, NULL) ;
elapsed_time = (double)(end.tv_sec - start.tv_sec) + ((double)(end.tv_usec - start.tv_usec))/1000000 ;
#endif
double elapsed_timeG;
#if USE_MPI
MPI_Reduce(&elapsed_time, &elapsed_timeG, 1, MPI_DOUBLE,
MPI_MAX, 0, MPI_COMM_WORLD);
#else
elapsed_timeG = elapsed_time;
#endif
cudaProfilerStop();
if (myRank == 0)
VerifyAndWriteFinalOutput(elapsed_timeG, *locDom, its, nx, numRanks);
#ifdef SAMI
DumpDomain(locDom) ;
#endif
cudaDeviceReset();
#if USE_MPI
MPI_Finalize() ;
#endif
return 0 ;
}
|
1c126dc333520bc48f80371723cdf34ae96d30b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "constants.cuh"
#include "mesh.cuh"
#include "material.cuh"
#include "sys.cuh"
#include "matrix_functions.cuh"
#include "mma.cuh"
#include "pgm_utility.cuh"
#include "parallel.cuh"
void mma(struct sparse *h, double *hs, struct mesh *mesh, struct material *material, int *ik, int *jk, double *me, double *ke, int *freedofs,
int*fixeddofs, struct sparse *f, int *edofmat) {
struct subsolv_pointers ssp;
struct mmasub_pointers msp;
struct kktcheck_pointers kktp;
struct mma_param mma_p;
double *xphys;
struct sys g;
struct sys gec8;
struct sys g2;
double kktnorm,residumax;
int kktcount;
int outit;
char filename[10];
hipblasHandle_t cublas_handle;
hipsolverDnHandle_t cusolverH = NULL;
hipblasCreate(&cublas_handle);
hipsolverDnCreate(&cusolverH);
gec8_sys_init(&gec8);
mma_init(&mma_p, mesh);
xphys = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double));
subsolv_malloc(mma_p.m,mma_p.n,&ssp);
mmasub_malloc(mma_p.m, mma_p.n, &msp);
kktcheck_malloc(mma_p.m, mma_p.n, &kktp);
//creating the image with the initial conditions
sprintf(filename, "img%d.pgm", 0);
write_image(filename, mma_p.n, mesh, mma_p.xval);
if (mma_p.outeriter < 0.5) {
hinf_compute_mma(xphys,&mma_p, h, hs, mesh, material, ik, jk, me, ke, freedofs, fixeddofs, f , &g, &gec8, &g2, edofmat, cublas_handle, cusolverH);
}
printf("INTIAL HINF NRM: %f\n", (mma_p.f0val / 100));
kktnorm = mma_p.kkttol + 10;
mma_p.f0val = mma_p.f0valsoglia + 50;
outit = 0;
kktcount = 0;
while ((kktnorm>mma_p.kkttol)&&(outit<mma_p.maxoutit)&&(kktcount<3)) {
outit++;
mma_p.outeriter++;
mmasub(&mma_p, mma_p.f0val, mma_p.df0dx, mma_p.fval, mma_p.dfdx, mesh, &msp, &ssp);
for (int i = 0; i < mma_p.n; i++) {
mma_p.xold2[i] = mma_p.xold1[i]; //xold2 = xold1;
mma_p.xold1[i] = mma_p.xval[i]; //xold1 = xval;
mma_p.xval[i] = mma_p.xmma[i]; //xval = xmma;
}
hinf_compute_mma(xphys, &mma_p, h, hs, mesh, material, ik, jk, me, ke, freedofs, fixeddofs, f, &g, &gec8, &g2, edofmat,cublas_handle,cusolverH); //[f0val,df0dx,fval,dfdx] = HINF_COMPUTE_MMA(xval);
kktcheck(&mma_p, &kktp, mma_p.df0dx, mma_p.fval, mma_p.dfdx, &kktnorm, &residumax);
if (kktnorm < mma_p.kkttol) {
kktcount++;
}
else {
kktcount = 0;
}
//printf("%f %f %f %f\n", kktnorm, residumax, mma_p.fval[0], mma_p.f0val);
//every iterations a new image is created
if (outit % 1==0 ){
sprintf(filename, "img%d.pgm", outit);
write_image(filename, mma_p.n, mesh, xphys);
}
if (outit % (int)(mma_p.maxoutit/100) == 0) {
printf("%d/100 completed\n", (int)((outit/mma_p.maxoutit)*100));
}
}
printf("FINAL HINF NRM: %f\n", (mma_p.f0val / 100));
hipblasDestroy(cublas_handle);
hipsolverDnDestroy(cusolverH);
subsolv_free(&ssp);
mmasub_free(&msp);
kktcheck_free(&kktp);
mma_param_free(&mma_p);
free_sys(&gec8);
free(xphys);
}
| 1c126dc333520bc48f80371723cdf34ae96d30b4.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "constants.cuh"
#include "mesh.cuh"
#include "material.cuh"
#include "sys.cuh"
#include "matrix_functions.cuh"
#include "mma.cuh"
#include "pgm_utility.cuh"
#include "parallel.cuh"
void mma(struct sparse *h, double *hs, struct mesh *mesh, struct material *material, int *ik, int *jk, double *me, double *ke, int *freedofs,
int*fixeddofs, struct sparse *f, int *edofmat) {
struct subsolv_pointers ssp;
struct mmasub_pointers msp;
struct kktcheck_pointers kktp;
struct mma_param mma_p;
double *xphys;
struct sys g;
struct sys gec8;
struct sys g2;
double kktnorm,residumax;
int kktcount;
int outit;
char filename[10];
cublasHandle_t cublas_handle;
cusolverDnHandle_t cusolverH = NULL;
cublasCreate(&cublas_handle);
cusolverDnCreate(&cusolverH);
gec8_sys_init(&gec8);
mma_init(&mma_p, mesh);
xphys = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double));
subsolv_malloc(mma_p.m,mma_p.n,&ssp);
mmasub_malloc(mma_p.m, mma_p.n, &msp);
kktcheck_malloc(mma_p.m, mma_p.n, &kktp);
//creating the image with the initial conditions
sprintf(filename, "img%d.pgm", 0);
write_image(filename, mma_p.n, mesh, mma_p.xval);
if (mma_p.outeriter < 0.5) {
hinf_compute_mma(xphys,&mma_p, h, hs, mesh, material, ik, jk, me, ke, freedofs, fixeddofs, f , &g, &gec8, &g2, edofmat, cublas_handle, cusolverH);
}
printf("INTIAL HINF NRM: %f\n", (mma_p.f0val / 100));
kktnorm = mma_p.kkttol + 10;
mma_p.f0val = mma_p.f0valsoglia + 50;
outit = 0;
kktcount = 0;
while ((kktnorm>mma_p.kkttol)&&(outit<mma_p.maxoutit)&&(kktcount<3)) {
outit++;
mma_p.outeriter++;
mmasub(&mma_p, mma_p.f0val, mma_p.df0dx, mma_p.fval, mma_p.dfdx, mesh, &msp, &ssp);
for (int i = 0; i < mma_p.n; i++) {
mma_p.xold2[i] = mma_p.xold1[i]; //xold2 = xold1;
mma_p.xold1[i] = mma_p.xval[i]; //xold1 = xval;
mma_p.xval[i] = mma_p.xmma[i]; //xval = xmma;
}
hinf_compute_mma(xphys, &mma_p, h, hs, mesh, material, ik, jk, me, ke, freedofs, fixeddofs, f, &g, &gec8, &g2, edofmat,cublas_handle,cusolverH); //[f0val,df0dx,fval,dfdx] = HINF_COMPUTE_MMA(xval);
kktcheck(&mma_p, &kktp, mma_p.df0dx, mma_p.fval, mma_p.dfdx, &kktnorm, &residumax);
if (kktnorm < mma_p.kkttol) {
kktcount++;
}
else {
kktcount = 0;
}
//printf("%f %f %f %f\n", kktnorm, residumax, mma_p.fval[0], mma_p.f0val);
//every iterations a new image is created
if (outit % 1==0 ){
sprintf(filename, "img%d.pgm", outit);
write_image(filename, mma_p.n, mesh, xphys);
}
if (outit % (int)(mma_p.maxoutit/100) == 0) {
printf("%d/100 completed\n", (int)((outit/mma_p.maxoutit)*100));
}
}
printf("FINAL HINF NRM: %f\n", (mma_p.f0val / 100));
cublasDestroy(cublas_handle);
cusolverDnDestroy(cusolverH);
subsolv_free(&ssp);
mmasub_free(&msp);
kktcheck_free(&kktp);
mma_param_free(&mma_p);
free_sys(&gec8);
free(xphys);
}
|
d7ceb6702adfb41f622f1774d7b1358095178138.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//[liao6@tux385:~/workspace/autoPar/buildtree/tests/nonsmoke/functional/roseTests/ompLoweringTests]cat rose_jacobi-ompacc-opt2.cu
// Liao, 7/9/2014, add collapse() inside jacobi()
// Liao, 1/22/2015, test nested map() clauses supported by device data environment reuse.
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
#include "libxomp.h"
#include "xomp_cuda_lib_inlined.cu"
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,(struct timezone *)((void *)0));
time = t . tv_sec + 1.0e-6 * t . tv_usec;
return time;
}
double time1;
double time2;
void driver();
void initialize();
void jacobi();
void error_check();
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 5120
int n;
int m;
int mits;
#define REAL float // flexible between float and double
// depending on MSIZE!!
float error_ref = 9.212767E-04;
float resid_ref = 2.355429E-08;
float tol;
float relax = 1.0;
float alpha = 0.0543;
float u[512][512];
float f[512][512];
float uold[512][512];
float dx;
float dy;
// value, reference value, and the number of significant digits to be ensured.
double diff_ratio(double val,double ref,int significant_digits)
{
significant_digits >= 1?((void )0) : __assert_fail("significant_digits>=1","jacobi-ompacc-opt2.c",67,__PRETTY_FUNCTION__);
double diff_ratio = fabs(val - ref) / fabs(ref);
// 1.0/(double(10^significant_digits)) ;
double upper_limit = pow(0.1,significant_digits);
printf("value :%E ref_value: %E diff_ratio: %E upper_limit: %E \n",val,ref,diff_ratio,upper_limit);
// ensure the number of the significant digits to be the same
diff_ratio < upper_limit?((void )0) : __assert_fail("diff_ratio < upper_limit","jacobi-ompacc-opt2.c",72,__PRETTY_FUNCTION__);
return diff_ratio;
}
int main()
{
xomp_acc_init();
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n = 512;
m = 512;
tol = 0.0000000001;
mits = 5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#endif
#endif
driver();
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver()
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2 - time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check();
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize()
{
int i;
int j;
int xx;
int yy;
//double PI=3.1415926;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int )(- 1.0 + (dx * (i - 1))));
yy = ((int )(- 1.0 + (dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)));
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
__global__ void OUT__1__11053__(float omega,float ax,float ay,float b,int __final_total_iters__2__,int __i_interval__3__,float *_dev_per_block_error,float *_dev_u,float *_dev_f,float *_dev_uold)
{
int _p_i;
int _p_j;
float _p_error;
_p_error = 0;
float _p_resid;
int _p___collapsed_index__5__;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(0,__final_total_iters__2__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__2__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p___collapsed_index__5__ = _dev_lower; _p___collapsed_index__5__ <= _dev_upper; _p___collapsed_index__5__ += 1) {
_p_i = _p___collapsed_index__5__ / __i_interval__3__ * 1 + 1;
_p_j = _p___collapsed_index__5__ % __i_interval__3__ * 1 + 1;
_p_resid = (ax * (_dev_uold[(_p_i - 1) * 512 + _p_j] + _dev_uold[(_p_i + 1) * 512 + _p_j]) + ay * (_dev_uold[_p_i * 512 + (_p_j - 1)] + _dev_uold[_p_i * 512 + (_p_j + 1)]) + b * _dev_uold[_p_i * 512 + _p_j] - _dev_f[_p_i * 512 + _p_j]) / b;
_dev_u[_p_i * 512 + _p_j] = _dev_uold[_p_i * 512 + _p_j] - omega * _p_resid;
_p_error = _p_error + _p_resid * _p_resid;
}
xomp_inner_block_reduction_float(_p_error,_dev_per_block_error,6);
}
// swap old and new arrays
__global__ void OUT__2__11053__(int __final_total_iters__8__,int __i_interval__9__,float *_dev_u,float *_dev_uold)
{
int _p___collapsed_index__11__;
int _p_i;
int _p_j;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(0,__final_total_iters__8__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__8__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p___collapsed_index__11__ = _dev_lower; _p___collapsed_index__11__ <= _dev_upper; _p___collapsed_index__11__ += 1) {
_p_i = _p___collapsed_index__11__ / __i_interval__9__ * 1 + 0;
_p_j = _p___collapsed_index__11__ % __i_interval__9__ * 1 + 0;
_dev_uold[_p_i * 512 + _p_j] = _dev_u[_p_i * 512 + _p_j];
}
}
void jacobi()
{
float time, cumulative_time = 0.f;
float omega;
int k;
float error;
float ax;
float ay;
float b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega = relax;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 0;
// An optimization on top of naive coding: promoting data handling outside the while loop
// data properties may change since the scope is bigger:
/* Translated from #pragma omp target data ... */
{
xomp_deviceDataEnvironmentEnter(0);
// float *_dev_u;
int _dev_u_size[2] = {n, m};
int _dev_u_offset[2] = {0, 0};
int _dev_u_Dim[2] = {512, 512};
// _dev_u =
(float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)u,2,sizeof(float ),_dev_u_size,_dev_u_offset,_dev_u_Dim,1,1));
//float *_dev_f;
int _dev_f_size[2] = {n, m};
int _dev_f_offset[2] = {0, 0};
int _dev_f_Dim[2] = {512, 512};
// _dev_f =
((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)f,2,sizeof(float ),_dev_f_size,_dev_f_offset,_dev_f_Dim,1,0)));
//float *_dev_uold;
int _dev_uold_size[2] = {n, m};
int _dev_uold_offset[2] = {0, 0};
int _dev_uold_Dim[2] = {512, 512};
//_dev_uold =
((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)uold,2,sizeof(float ),_dev_uold_size,_dev_uold_offset,_dev_uold_Dim,0,0)));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//while(k <= mits && error > tol){
while(k < 10) {
int __i_total_iters__0__ = (n - 1 - 1 - 1 + 1) % 1 == 0?(n - 1 - 1 - 1 + 1) / 1 : (n - 1 - 1 - 1 + 1) / 1 + 1;
int __j_total_iters__1__ = (m - 1 - 1 - 1 + 1) % 1 == 0?(m - 1 - 1 - 1 + 1) / 1 : (m - 1 - 1 - 1 + 1) / 1 + 1;
int __final_total_iters__2__ = 1 * __i_total_iters__0__ * __j_total_iters__1__;
int __i_interval__3__ = __j_total_iters__1__ * 1;
//int __j_interval__4__ = 1;
//int __collapsed_index__5__;
int __i_total_iters__6__ = (n - 1 - 0 + 1) % 1 == 0?(n - 1 - 0 + 1) / 1 : (n - 1 - 0 + 1) / 1 + 1;
int __j_total_iters__7__ = (m - 1 - 0 + 1) % 1 == 0?(m - 1 - 0 + 1) / 1 : (m - 1 - 0 + 1) / 1 + 1;
int __final_total_iters__8__ = 1 * __i_total_iters__6__ * __j_total_iters__7__;
int __i_interval__9__ = __j_total_iters__7__ * 1;
//int __j_interval__10__ = 1;
//int __collapsed_index__11__;
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
// {
{
xomp_deviceDataEnvironmentEnter(0);
float *_dev_u;
int _dev_u_size[2] = {n, m};
int _dev_u_offset[2] = {0, 0};
int _dev_u_Dim[2] = {512, 512};
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)u,2,sizeof(float ),_dev_u_size,_dev_u_offset,_dev_u_Dim,1,0)));
float *_dev_uold;
int _dev_uold_size[2] = {n, m};
int _dev_uold_offset[2] = {0, 0};
int _dev_uold_Dim[2] = {512, 512};
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)uold,2,sizeof(float ),_dev_uold_size,_dev_uold_offset,_dev_uold_Dim,0,1)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0);
int _num_blocks_ = xomp_get_max1DBlock(0,__final_total_iters__8__ - 1 - 0 + 1);
hipLaunchKernelGGL(( OUT__2__11053__), dim3(_num_blocks_),dim3(_threads_per_block_), 0, 0, __final_total_iters__8__,__i_interval__9__,_dev_u,_dev_uold);
xomp_deviceDataEnvironmentExit(0);
}
// real jacobi kernel calculation
{
xomp_deviceDataEnvironmentEnter(0);
float *_dev_u;
int _dev_u_size[2] = {n, m};
int _dev_u_offset[2] = {0, 0};
int _dev_u_Dim[2] = {512, 512};
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)u,2,sizeof(float ),_dev_u_size,_dev_u_offset,_dev_u_Dim,0,1)));
float *_dev_f;
int _dev_f_size[2] = {n, m};
int _dev_f_offset[2] = {0, 0};
int _dev_f_Dim[2] = {512, 512};
_dev_f = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)f,2,sizeof(float ),_dev_f_size,_dev_f_offset,_dev_f_Dim,1,0)));
float *_dev_uold;
int _dev_uold_size[2] = {n, m};
int _dev_uold_offset[2] = {0, 0};
int _dev_uold_Dim[2] = {512, 512};
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)uold,2,sizeof(float ),_dev_uold_size,_dev_uold_offset,_dev_uold_Dim,1,0)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0);
int _num_blocks_ = xomp_get_max1DBlock(0,__final_total_iters__2__ - 1 - 0 + 1);
float *_dev_per_block_error = (float *)(xomp_deviceMalloc(_num_blocks_ * sizeof(float )));
hipEventRecord(start, 0);
if (k==0)
printf("Kernel launch configuration: 1-D blocks=%d threads-per-block=%d \n", _num_blocks_, _threads_per_block_);
hipLaunchKernelGGL(( OUT__1__11053__), dim3(_num_blocks_),dim3(_threads_per_block_),(_threads_per_block_ * sizeof(float )), 0, omega,ax,ay,b,__final_total_iters__2__,__i_interval__3__,_dev_per_block_error,_dev_u,_dev_f,_dev_uold);
error = xomp_beyond_block_reduction_float(_dev_per_block_error,_num_blocks_,6);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
cumulative_time = cumulative_time + time;
xomp_freeDevice(_dev_per_block_error);
xomp_deviceDataEnvironmentExit(0);
}
// }
/* omp end parallel */
k = k + 1;
/* Error check */
if (k % 500 == 0)
printf("Finished %d iteration with error =%f\n",k,error);
error = (sqrt(error) / (n * m));
/* End iteration loop */
}
xomp_deviceDataEnvironmentExit(0);
}
printf("array size :%d\n", MSIZE);
printf("jacobi kernel + reduction time , average over %d times: %3.5f ms \n", k, cumulative_time / k);
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n",error);
printf("Residual_ref :%E\n",resid_ref);
printf("Diff ref=%E\n",(fabs((error - resid_ref))));
// fabs((error - resid_ref)) < 1E-13?((void )0) : __assert_fail("fabs(error-resid_ref) < 1E-13","jacobi-ompacc-opt2.c",247,__PRETTY_FUNCTION__);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check()
{
int i;
int j;
float xx;
float yy;
float temp;
float error;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
error = 0.0;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (- 1.0 + (dx * (i - 1)));
yy = (- 1.0 + (dy * (j - 1)));
temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy)));
error = error + temp * temp;
}
error = (sqrt(error) / (n * m));
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf("Diff ref=%E\n",(fabs((error - error_ref))));
// turn off this for now , we only iterate 10 times for performance modeling data collection
// fabs((error - error_ref)) < 1E-13?((void )0) : __assert_fail("fabs(error-error_ref) < 1E-13","jacobi-ompacc-opt2.c",278,__PRETTY_FUNCTION__);
}
| d7ceb6702adfb41f622f1774d7b1358095178138.cu | //[liao6@tux385:~/workspace/autoPar/buildtree/tests/nonsmoke/functional/roseTests/ompLoweringTests]cat rose_jacobi-ompacc-opt2.cu
// Liao, 7/9/2014, add collapse() inside jacobi()
// Liao, 1/22/2015, test nested map() clauses supported by device data environment reuse.
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
#include "libxomp.h"
#include "xomp_cuda_lib_inlined.cu"
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,(struct timezone *)((void *)0));
time = t . tv_sec + 1.0e-6 * t . tv_usec;
return time;
}
double time1;
double time2;
void driver();
void initialize();
void jacobi();
void error_check();
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 5120
int n;
int m;
int mits;
#define REAL float // flexible between float and double
// depending on MSIZE!!
float error_ref = 9.212767E-04;
float resid_ref = 2.355429E-08;
float tol;
float relax = 1.0;
float alpha = 0.0543;
float u[512][512];
float f[512][512];
float uold[512][512];
float dx;
float dy;
// value, reference value, and the number of significant digits to be ensured.
double diff_ratio(double val,double ref,int significant_digits)
{
significant_digits >= 1?((void )0) : __assert_fail("significant_digits>=1","jacobi-ompacc-opt2.c",67,__PRETTY_FUNCTION__);
double diff_ratio = fabs(val - ref) / fabs(ref);
// 1.0/(double(10^significant_digits)) ;
double upper_limit = pow(0.1,significant_digits);
printf("value :%E ref_value: %E diff_ratio: %E upper_limit: %E \n",val,ref,diff_ratio,upper_limit);
// ensure the number of the significant digits to be the same
diff_ratio < upper_limit?((void )0) : __assert_fail("diff_ratio < upper_limit","jacobi-ompacc-opt2.c",72,__PRETTY_FUNCTION__);
return diff_ratio;
}
int main()
{
xomp_acc_init();
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n = 512;
m = 512;
tol = 0.0000000001;
mits = 5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#endif
#endif
driver();
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver()
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2 - time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check();
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize()
{
int i;
int j;
int xx;
int yy;
//double PI=3.1415926;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int )(- 1.0 + (dx * (i - 1))));
yy = ((int )(- 1.0 + (dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)));
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
__global__ void OUT__1__11053__(float omega,float ax,float ay,float b,int __final_total_iters__2__,int __i_interval__3__,float *_dev_per_block_error,float *_dev_u,float *_dev_f,float *_dev_uold)
{
int _p_i;
int _p_j;
float _p_error;
_p_error = 0;
float _p_resid;
int _p___collapsed_index__5__;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(0,__final_total_iters__2__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__2__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p___collapsed_index__5__ = _dev_lower; _p___collapsed_index__5__ <= _dev_upper; _p___collapsed_index__5__ += 1) {
_p_i = _p___collapsed_index__5__ / __i_interval__3__ * 1 + 1;
_p_j = _p___collapsed_index__5__ % __i_interval__3__ * 1 + 1;
_p_resid = (ax * (_dev_uold[(_p_i - 1) * 512 + _p_j] + _dev_uold[(_p_i + 1) * 512 + _p_j]) + ay * (_dev_uold[_p_i * 512 + (_p_j - 1)] + _dev_uold[_p_i * 512 + (_p_j + 1)]) + b * _dev_uold[_p_i * 512 + _p_j] - _dev_f[_p_i * 512 + _p_j]) / b;
_dev_u[_p_i * 512 + _p_j] = _dev_uold[_p_i * 512 + _p_j] - omega * _p_resid;
_p_error = _p_error + _p_resid * _p_resid;
}
xomp_inner_block_reduction_float(_p_error,_dev_per_block_error,6);
}
// swap old and new arrays
__global__ void OUT__2__11053__(int __final_total_iters__8__,int __i_interval__9__,float *_dev_u,float *_dev_uold)
{
int _p___collapsed_index__11__;
int _p_i;
int _p_j;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(0,__final_total_iters__8__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__8__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p___collapsed_index__11__ = _dev_lower; _p___collapsed_index__11__ <= _dev_upper; _p___collapsed_index__11__ += 1) {
_p_i = _p___collapsed_index__11__ / __i_interval__9__ * 1 + 0;
_p_j = _p___collapsed_index__11__ % __i_interval__9__ * 1 + 0;
_dev_uold[_p_i * 512 + _p_j] = _dev_u[_p_i * 512 + _p_j];
}
}
void jacobi()
{
float time, cumulative_time = 0.f;
float omega;
int k;
float error;
float ax;
float ay;
float b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega = relax;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 0;
// An optimization on top of naive coding: promoting data handling outside the while loop
// data properties may change since the scope is bigger:
/* Translated from #pragma omp target data ... */
{
xomp_deviceDataEnvironmentEnter(0);
// float *_dev_u;
int _dev_u_size[2] = {n, m};
int _dev_u_offset[2] = {0, 0};
int _dev_u_Dim[2] = {512, 512};
// _dev_u =
(float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)u,2,sizeof(float ),_dev_u_size,_dev_u_offset,_dev_u_Dim,1,1));
//float *_dev_f;
int _dev_f_size[2] = {n, m};
int _dev_f_offset[2] = {0, 0};
int _dev_f_Dim[2] = {512, 512};
// _dev_f =
((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)f,2,sizeof(float ),_dev_f_size,_dev_f_offset,_dev_f_Dim,1,0)));
//float *_dev_uold;
int _dev_uold_size[2] = {n, m};
int _dev_uold_offset[2] = {0, 0};
int _dev_uold_Dim[2] = {512, 512};
//_dev_uold =
((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)uold,2,sizeof(float ),_dev_uold_size,_dev_uold_offset,_dev_uold_Dim,0,0)));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//while(k <= mits && error > tol){
while(k < 10) {
int __i_total_iters__0__ = (n - 1 - 1 - 1 + 1) % 1 == 0?(n - 1 - 1 - 1 + 1) / 1 : (n - 1 - 1 - 1 + 1) / 1 + 1;
int __j_total_iters__1__ = (m - 1 - 1 - 1 + 1) % 1 == 0?(m - 1 - 1 - 1 + 1) / 1 : (m - 1 - 1 - 1 + 1) / 1 + 1;
int __final_total_iters__2__ = 1 * __i_total_iters__0__ * __j_total_iters__1__;
int __i_interval__3__ = __j_total_iters__1__ * 1;
//int __j_interval__4__ = 1;
//int __collapsed_index__5__;
int __i_total_iters__6__ = (n - 1 - 0 + 1) % 1 == 0?(n - 1 - 0 + 1) / 1 : (n - 1 - 0 + 1) / 1 + 1;
int __j_total_iters__7__ = (m - 1 - 0 + 1) % 1 == 0?(m - 1 - 0 + 1) / 1 : (m - 1 - 0 + 1) / 1 + 1;
int __final_total_iters__8__ = 1 * __i_total_iters__6__ * __j_total_iters__7__;
int __i_interval__9__ = __j_total_iters__7__ * 1;
//int __j_interval__10__ = 1;
//int __collapsed_index__11__;
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
// {
{
xomp_deviceDataEnvironmentEnter(0);
float *_dev_u;
int _dev_u_size[2] = {n, m};
int _dev_u_offset[2] = {0, 0};
int _dev_u_Dim[2] = {512, 512};
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)u,2,sizeof(float ),_dev_u_size,_dev_u_offset,_dev_u_Dim,1,0)));
float *_dev_uold;
int _dev_uold_size[2] = {n, m};
int _dev_uold_offset[2] = {0, 0};
int _dev_uold_Dim[2] = {512, 512};
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)uold,2,sizeof(float ),_dev_uold_size,_dev_uold_offset,_dev_uold_Dim,0,1)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0);
int _num_blocks_ = xomp_get_max1DBlock(0,__final_total_iters__8__ - 1 - 0 + 1);
OUT__2__11053__<<<_num_blocks_,_threads_per_block_>>>(__final_total_iters__8__,__i_interval__9__,_dev_u,_dev_uold);
xomp_deviceDataEnvironmentExit(0);
}
// real jacobi kernel calculation
{
xomp_deviceDataEnvironmentEnter(0);
float *_dev_u;
int _dev_u_size[2] = {n, m};
int _dev_u_offset[2] = {0, 0};
int _dev_u_Dim[2] = {512, 512};
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)u,2,sizeof(float ),_dev_u_size,_dev_u_offset,_dev_u_Dim,0,1)));
float *_dev_f;
int _dev_f_size[2] = {n, m};
int _dev_f_offset[2] = {0, 0};
int _dev_f_Dim[2] = {512, 512};
_dev_f = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)f,2,sizeof(float ),_dev_f_size,_dev_f_offset,_dev_f_Dim,1,0)));
float *_dev_uold;
int _dev_uold_size[2] = {n, m};
int _dev_uold_offset[2] = {0, 0};
int _dev_uold_Dim[2] = {512, 512};
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)uold,2,sizeof(float ),_dev_uold_size,_dev_uold_offset,_dev_uold_Dim,1,0)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0);
int _num_blocks_ = xomp_get_max1DBlock(0,__final_total_iters__2__ - 1 - 0 + 1);
float *_dev_per_block_error = (float *)(xomp_deviceMalloc(_num_blocks_ * sizeof(float )));
cudaEventRecord(start, 0);
if (k==0)
printf("Kernel launch configuration: 1-D blocks=%d threads-per-block=%d \n", _num_blocks_, _threads_per_block_);
OUT__1__11053__<<<_num_blocks_,_threads_per_block_,(_threads_per_block_ * sizeof(float ))>>>(omega,ax,ay,b,__final_total_iters__2__,__i_interval__3__,_dev_per_block_error,_dev_u,_dev_f,_dev_uold);
error = xomp_beyond_block_reduction_float(_dev_per_block_error,_num_blocks_,6);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cumulative_time = cumulative_time + time;
xomp_freeDevice(_dev_per_block_error);
xomp_deviceDataEnvironmentExit(0);
}
// }
/* omp end parallel */
k = k + 1;
/* Error check */
if (k % 500 == 0)
printf("Finished %d iteration with error =%f\n",k,error);
error = (sqrt(error) / (n * m));
/* End iteration loop */
}
xomp_deviceDataEnvironmentExit(0);
}
printf("array size :%d\n", MSIZE);
printf("jacobi kernel + reduction time , average over %d times: %3.5f ms \n", k, cumulative_time / k);
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n",error);
printf("Residual_ref :%E\n",resid_ref);
printf("Diff ref=%E\n",(fabs((error - resid_ref))));
// fabs((error - resid_ref)) < 1E-13?((void )0) : __assert_fail("fabs(error-resid_ref) < 1E-13","jacobi-ompacc-opt2.c",247,__PRETTY_FUNCTION__);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check()
{
int i;
int j;
float xx;
float yy;
float temp;
float error;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
error = 0.0;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (- 1.0 + (dx * (i - 1)));
yy = (- 1.0 + (dy * (j - 1)));
temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy)));
error = error + temp * temp;
}
error = (sqrt(error) / (n * m));
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf("Diff ref=%E\n",(fabs((error - error_ref))));
// turn off this for now , we only iterate 10 times for performance modeling data collection
// fabs((error - error_ref)) < 1E-13?((void )0) : __assert_fail("fabs(error-error_ref) < 1E-13","jacobi-ompacc-opt2.c",278,__PRETTY_FUNCTION__);
}
|
4e4159060687e95d755f73db562727ae751a2148.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef TENSOR_CUH_
#define TENSOR_CUH_
#include <assert.h>
#include <cstdlib>
#include <numeric>
#include <functional>
#include "basics/session.hpp"
#include "hip/hip_runtime.h"
#include "utils/helper_cuda.h"
#include "stdio.h"
/*
4D Tensor
*/
template<class Dtype>
class Tensor {
public:
__host__ __device__ inline unsigned GetIdx(const int* idx) const {
unsigned out_idx = 0;
// for (int i = 3; i >= 0; i--)
for(int i = 0; i < 4; i++)
out_idx = out_idx*dims_[i] + idx[i];
return out_idx;
}
__host__ __device__ const size_t* SetDims(size_t * dims) {
assert(dims[0] == dims_[0]);
size_t new_len = dims[0]*dims[1]*dims[2]*dims[3];
assert(new_len == len_);
dims_[0] = dims[0];
dims_[1] = dims[1];
dims_[2] = dims[2];
dims_[3] = dims[3];
}
__host__ __device__ const size_t* GetDims() const {
return dims_;
}
__host__ __device__ Dtype* GetDataPtr() const {
return data_array_;
}
__host__ __device__ void SetDataPtr(Dtype* data_array_ptr) {
data_array_ = data_array_ptr;
}
__host__ __device__ inline Dtype& at(const int i0, const int i1, const int i2, const int i3) {
int idx[4] = {i0, i1, i2, i3};
return at(idx);
}
__host__ __device__ inline Dtype& at(const int* idx) {
assert(isValidIdx(idx));
return data_array_[GetIdx(idx)];
}
__host__ __device__ inline const Dtype atPadding(const int i0, const int i1, const int i2, const int i3) {
int idx[4] = {i0, i1, i2, i3};
return atPadding(idx);
}
__host__ __device__ inline const Dtype atPadding(int* idx, Dtype default_val = 0.0) const {
if (!isValidIdx(idx)) return default_val;
return data_array_[GetIdx(idx)];
}
__host__ __device__ inline bool isValidIdx(const int i0, const int i1, const int i2, const int i3) {
int idx[4] = {i0, i1, i2, i3};
return isValidIdx(idx);
}
__host__ __device__ inline bool isValidIdx(const int* idx) const {
for(int i = 0; i < 4; i++) {
if(idx[i] < 0 || idx[i] >= dims_[i]) return false;
}
return true;
}
__host__ __device__ size_t size() const {
return len_;
}
__host__ __device__ void ResetVals(Dtype default_val = 0.0) {
for (size_t i = 0; i < len_; ++i) {
data_array_[i] = default_val;
}
}
__host__ static Tensor<Dtype>* CreateTensorGPU(size_t* dims, bool allocate_memory = true);
__host__ static Tensor<Dtype>* CreateTensorCPU(size_t* dims, bool allocate_memory = true);
__host__ static Tensor<Dtype> * TensorGPUtoCPU(Tensor<Dtype> * tensor_gpu);
__host__ static Tensor<Dtype> * TensorCPUtoGPU(Tensor<Dtype> * tensor_cpu);
__host__ static void DataArrayCPUtoGPU(Tensor<Dtype>*, Tensor<Dtype>*);
__host__ static void AllocateDataArrayGPU(Tensor<Dtype> * tensor_gpu);
__host__ static void AllocateDataArrayCPU(Tensor<Dtype> * tensor_cpu);
__host__ static void ReshapeTensorGPU(Tensor<Dtype> * tensor_gpu, size_t * dims);
__host__ static void ReshapeTensorCPU(Tensor<Dtype> * tensor_cpu, size_t * dims);
__host__ static void GetTensorGPUDims(Tensor<Dtype> * tensor_gpu, size_t * dims);
__host__ __device__ ~Tensor() {
if(data_array_ != NULL) {
delete [] data_array_;
}
}
private:
__host__ __device__ Tensor(size_t dims[4]): data_array_(NULL) {
len_ = dims[0] * dims[1] * dims[2] * dims[3];
dims_[0] = dims[0];
dims_[1] = dims[1];
dims_[2] = dims[2];
dims_[3] = dims[3];
}
Dtype* data_array_;
size_t dims_[4];
size_t len_;
};
template<class Dtype>
__host__ void Tensor<Dtype>::GetTensorGPUDims(Tensor<Dtype> * tensor_gpu, size_t * dims) {
hipMemcpy(dims, tensor_gpu->dims_, sizeof(size_t)*4, hipMemcpyDeviceToHost);
}
template<class Dtype>
__host__ void Tensor<Dtype>::ReshapeTensorCPU(Tensor<Dtype> * tensor_cpu, size_t * dims) {
tensor_cpu->SetDims(dims);
}
template<class Dtype>
__host__ void Tensor<Dtype>::ReshapeTensorGPU(Tensor<Dtype> * tensor_gpu, size_t * dims) {
hipMemcpy(tensor_gpu->dims_, dims, sizeof(size_t)*4, hipMemcpyHostToDevice);
}
// Create CPU/GPU Tensor
template<class Dtype>
__host__ Tensor<Dtype>* Tensor<Dtype>::CreateTensorCPU(size_t* dims, bool allocate_memory) {
Tensor<Dtype> * tensor_cpu = new Tensor(dims);
if (allocate_memory) {
AllocateDataArrayCPU(tensor_cpu);
}
return tensor_cpu;
}
template<class Dtype>
__host__ Tensor<Dtype>* Tensor<Dtype>::CreateTensorGPU(size_t* dims, bool allocate_memory) {
Tensor<Dtype> tensor_cpu(dims);
Tensor<Dtype>* tensor_gpu;
hipMalloc((void**)&tensor_gpu, sizeof(Tensor<Dtype>));
hipMemcpy(tensor_gpu, &tensor_cpu, sizeof(Tensor<Dtype>), hipMemcpyHostToDevice);
if (allocate_memory) {
AllocateDataArrayGPU(tensor_gpu);
}
return tensor_gpu;
}
template<class Dtype>
__host__ Tensor<Dtype> * Tensor<Dtype>::TensorGPUtoCPU(Tensor<Dtype> * tensor_gpu) {
Tensor<Dtype> * tensor_cpu = (Tensor<Dtype> *)malloc(sizeof(Tensor<Dtype>));
hipMemcpy(tensor_cpu, tensor_gpu, sizeof(Tensor<Dtype>), hipMemcpyDeviceToHost);
Dtype * data_array_ = (Dtype*) malloc(tensor_cpu->size()*sizeof(Dtype));
hipMemcpy(data_array_, tensor_cpu->GetDataPtr(), tensor_cpu->size() * sizeof(Dtype), hipMemcpyDeviceToHost);
tensor_cpu->SetDataPtr(data_array_);
return tensor_cpu;
}
template<class Dtype>
__host__ Tensor<Dtype> * Tensor<Dtype>::TensorCPUtoGPU(Tensor<Dtype> * tensor_cpu) {
Tensor<Dtype> * tensor_gpu;
hipMalloc((void **)&tensor_gpu, sizeof(Tensor<Dtype>));
hipMemcpy(tensor_gpu, tensor_cpu, sizeof(Tensor<Dtype>), hipMemcpyHostToDevice);
Dtype* data_array;
hipMalloc((void**) &data_array, sizeof(Dtype)*tensor_cpu->size());
hipMemcpy(data_array, tensor_cpu->GetDataPtr(), sizeof(Dtype)*tensor_cpu->size(), hipMemcpyHostToDevice);
hipMemcpy(&tensor_gpu->data_array_, &data_array, sizeof(Dtype*), hipMemcpyHostToDevice);
return tensor_gpu;
}
template<class Dtype>
__host__ void Tensor<Dtype>::DataArrayCPUtoGPU(Tensor<Dtype> *tensor_cpu, Tensor<Dtype> *tensor_gpu) {
Dtype* data_array;
hipMemcpy(&data_array, &tensor_gpu->data_array_, sizeof(Dtype*), hipMemcpyDeviceToHost);
hipMemcpy(data_array, tensor_cpu->data_array_, sizeof(Dtype)*tensor_cpu->size(), hipMemcpyHostToDevice);
}
// Allocate Memory
template<class Dtype>
__host__ void Tensor<Dtype>::AllocateDataArrayGPU(Tensor<Dtype> * tensor_gpu) {
size_t * len = (size_t *) malloc(sizeof(size_t));
hipMemcpy(len, &tensor_gpu->len_, sizeof(size_t), hipMemcpyDeviceToHost);
Dtype* data_array_gpu;
hipMalloc((void**)&(data_array_gpu), sizeof(Dtype)*(*len));
hipMemcpy(&(tensor_gpu->data_array_), &data_array_gpu, sizeof(Dtype*), hipMemcpyHostToDevice);
}
template<class Dtype>
__host__ void Tensor<Dtype>::AllocateDataArrayCPU(Tensor<Dtype> * tensor_cpu) {
if (tensor_cpu->data_array_ == NULL) {
tensor_cpu->data_array_ = new Dtype[tensor_cpu->len_];
}
}
#endif // TENSOR_CUH_
| 4e4159060687e95d755f73db562727ae751a2148.cu | #ifndef TENSOR_CUH_
#define TENSOR_CUH_
#include <assert.h>
#include <cstdlib>
#include <numeric>
#include <functional>
#include "basics/session.hpp"
#include "cuda_runtime.h"
#include "utils/helper_cuda.h"
#include "stdio.h"
/*
4D Tensor
*/
template<class Dtype>
class Tensor {
public:
__host__ __device__ inline unsigned GetIdx(const int* idx) const {
unsigned out_idx = 0;
// for (int i = 3; i >= 0; i--)
for(int i = 0; i < 4; i++)
out_idx = out_idx*dims_[i] + idx[i];
return out_idx;
}
__host__ __device__ const size_t* SetDims(size_t * dims) {
assert(dims[0] == dims_[0]);
size_t new_len = dims[0]*dims[1]*dims[2]*dims[3];
assert(new_len == len_);
dims_[0] = dims[0];
dims_[1] = dims[1];
dims_[2] = dims[2];
dims_[3] = dims[3];
}
__host__ __device__ const size_t* GetDims() const {
return dims_;
}
__host__ __device__ Dtype* GetDataPtr() const {
return data_array_;
}
__host__ __device__ void SetDataPtr(Dtype* data_array_ptr) {
data_array_ = data_array_ptr;
}
__host__ __device__ inline Dtype& at(const int i0, const int i1, const int i2, const int i3) {
int idx[4] = {i0, i1, i2, i3};
return at(idx);
}
__host__ __device__ inline Dtype& at(const int* idx) {
assert(isValidIdx(idx));
return data_array_[GetIdx(idx)];
}
__host__ __device__ inline const Dtype atPadding(const int i0, const int i1, const int i2, const int i3) {
int idx[4] = {i0, i1, i2, i3};
return atPadding(idx);
}
__host__ __device__ inline const Dtype atPadding(int* idx, Dtype default_val = 0.0) const {
if (!isValidIdx(idx)) return default_val;
return data_array_[GetIdx(idx)];
}
__host__ __device__ inline bool isValidIdx(const int i0, const int i1, const int i2, const int i3) {
int idx[4] = {i0, i1, i2, i3};
return isValidIdx(idx);
}
__host__ __device__ inline bool isValidIdx(const int* idx) const {
for(int i = 0; i < 4; i++) {
if(idx[i] < 0 || idx[i] >= dims_[i]) return false;
}
return true;
}
__host__ __device__ size_t size() const {
return len_;
}
__host__ __device__ void ResetVals(Dtype default_val = 0.0) {
for (size_t i = 0; i < len_; ++i) {
data_array_[i] = default_val;
}
}
__host__ static Tensor<Dtype>* CreateTensorGPU(size_t* dims, bool allocate_memory = true);
__host__ static Tensor<Dtype>* CreateTensorCPU(size_t* dims, bool allocate_memory = true);
__host__ static Tensor<Dtype> * TensorGPUtoCPU(Tensor<Dtype> * tensor_gpu);
__host__ static Tensor<Dtype> * TensorCPUtoGPU(Tensor<Dtype> * tensor_cpu);
__host__ static void DataArrayCPUtoGPU(Tensor<Dtype>*, Tensor<Dtype>*);
__host__ static void AllocateDataArrayGPU(Tensor<Dtype> * tensor_gpu);
__host__ static void AllocateDataArrayCPU(Tensor<Dtype> * tensor_cpu);
__host__ static void ReshapeTensorGPU(Tensor<Dtype> * tensor_gpu, size_t * dims);
__host__ static void ReshapeTensorCPU(Tensor<Dtype> * tensor_cpu, size_t * dims);
__host__ static void GetTensorGPUDims(Tensor<Dtype> * tensor_gpu, size_t * dims);
__host__ __device__ ~Tensor() {
if(data_array_ != NULL) {
delete [] data_array_;
}
}
private:
__host__ __device__ Tensor(size_t dims[4]): data_array_(NULL) {
len_ = dims[0] * dims[1] * dims[2] * dims[3];
dims_[0] = dims[0];
dims_[1] = dims[1];
dims_[2] = dims[2];
dims_[3] = dims[3];
}
Dtype* data_array_;
size_t dims_[4];
size_t len_;
};
template<class Dtype>
__host__ void Tensor<Dtype>::GetTensorGPUDims(Tensor<Dtype> * tensor_gpu, size_t * dims) {
cudaMemcpy(dims, tensor_gpu->dims_, sizeof(size_t)*4, cudaMemcpyDeviceToHost);
}
template<class Dtype>
__host__ void Tensor<Dtype>::ReshapeTensorCPU(Tensor<Dtype> * tensor_cpu, size_t * dims) {
tensor_cpu->SetDims(dims);
}
template<class Dtype>
__host__ void Tensor<Dtype>::ReshapeTensorGPU(Tensor<Dtype> * tensor_gpu, size_t * dims) {
cudaMemcpy(tensor_gpu->dims_, dims, sizeof(size_t)*4, cudaMemcpyHostToDevice);
}
// Create CPU/GPU Tensor
template<class Dtype>
__host__ Tensor<Dtype>* Tensor<Dtype>::CreateTensorCPU(size_t* dims, bool allocate_memory) {
Tensor<Dtype> * tensor_cpu = new Tensor(dims);
if (allocate_memory) {
AllocateDataArrayCPU(tensor_cpu);
}
return tensor_cpu;
}
template<class Dtype>
__host__ Tensor<Dtype>* Tensor<Dtype>::CreateTensorGPU(size_t* dims, bool allocate_memory) {
Tensor<Dtype> tensor_cpu(dims);
Tensor<Dtype>* tensor_gpu;
cudaMalloc((void**)&tensor_gpu, sizeof(Tensor<Dtype>));
cudaMemcpy(tensor_gpu, &tensor_cpu, sizeof(Tensor<Dtype>), cudaMemcpyHostToDevice);
if (allocate_memory) {
AllocateDataArrayGPU(tensor_gpu);
}
return tensor_gpu;
}
template<class Dtype>
__host__ Tensor<Dtype> * Tensor<Dtype>::TensorGPUtoCPU(Tensor<Dtype> * tensor_gpu) {
Tensor<Dtype> * tensor_cpu = (Tensor<Dtype> *)malloc(sizeof(Tensor<Dtype>));
cudaMemcpy(tensor_cpu, tensor_gpu, sizeof(Tensor<Dtype>), cudaMemcpyDeviceToHost);
Dtype * data_array_ = (Dtype*) malloc(tensor_cpu->size()*sizeof(Dtype));
cudaMemcpy(data_array_, tensor_cpu->GetDataPtr(), tensor_cpu->size() * sizeof(Dtype), cudaMemcpyDeviceToHost);
tensor_cpu->SetDataPtr(data_array_);
return tensor_cpu;
}
template<class Dtype>
__host__ Tensor<Dtype> * Tensor<Dtype>::TensorCPUtoGPU(Tensor<Dtype> * tensor_cpu) {
Tensor<Dtype> * tensor_gpu;
cudaMalloc((void **)&tensor_gpu, sizeof(Tensor<Dtype>));
cudaMemcpy(tensor_gpu, tensor_cpu, sizeof(Tensor<Dtype>), cudaMemcpyHostToDevice);
Dtype* data_array;
cudaMalloc((void**) &data_array, sizeof(Dtype)*tensor_cpu->size());
cudaMemcpy(data_array, tensor_cpu->GetDataPtr(), sizeof(Dtype)*tensor_cpu->size(), cudaMemcpyHostToDevice);
cudaMemcpy(&tensor_gpu->data_array_, &data_array, sizeof(Dtype*), cudaMemcpyHostToDevice);
return tensor_gpu;
}
template<class Dtype>
__host__ void Tensor<Dtype>::DataArrayCPUtoGPU(Tensor<Dtype> *tensor_cpu, Tensor<Dtype> *tensor_gpu) {
Dtype* data_array;
cudaMemcpy(&data_array, &tensor_gpu->data_array_, sizeof(Dtype*), cudaMemcpyDeviceToHost);
cudaMemcpy(data_array, tensor_cpu->data_array_, sizeof(Dtype)*tensor_cpu->size(), cudaMemcpyHostToDevice);
}
// Allocate Memory
template<class Dtype>
__host__ void Tensor<Dtype>::AllocateDataArrayGPU(Tensor<Dtype> * tensor_gpu) {
size_t * len = (size_t *) malloc(sizeof(size_t));
cudaMemcpy(len, &tensor_gpu->len_, sizeof(size_t), cudaMemcpyDeviceToHost);
Dtype* data_array_gpu;
cudaMalloc((void**)&(data_array_gpu), sizeof(Dtype)*(*len));
cudaMemcpy(&(tensor_gpu->data_array_), &data_array_gpu, sizeof(Dtype*), cudaMemcpyHostToDevice);
}
template<class Dtype>
__host__ void Tensor<Dtype>::AllocateDataArrayCPU(Tensor<Dtype> * tensor_cpu) {
if (tensor_cpu->data_array_ == NULL) {
tensor_cpu->data_array_ = new Dtype[tensor_cpu->len_];
}
}
#endif // TENSOR_CUH_
|
1b5c41170ecc471cb8c3bcbc1ae4494506fc2e8d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <gltf-loader.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define MATERIAL_SORT
#define CACHE_BOUNCE // Determine whether cache the first bounce or do the stochastic sampling
// #define THIN_LENS_CAMERA
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static ShadeableIntersection* dev_cache_intersections = NULL;
static PathSegment* dev_cache_paths = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_cache_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment));
checkCUDAError("pathtraceInit");
}
__global__ void cpyCoordVals(float* tar, float* src, int offset, int src_length) {
int src_index = blockIdx.x * blockDim.x + threadIdx.x;
if (src_index >= src_length) {
return;
}
int tar_index = src_index / 3;
if (src_index % 3 == offset) {
tar[tar_index] = src[src_index];
}
}
__global__ void warpUV(int uv_size, float* dev_uv) {
int uv_index = blockIdx.x * blockDim.x + threadIdx.x;
if (uv_index >= uv_size) {
return;
}
float uv_val = dev_uv[uv_index];
if (uv_val > 1 || uv_val < 0) {
dev_uv[uv_index] = uv_val - glm::floor(uv_val);
}
}
void meshInit(Scene* scene) {
for (int i = 0; i < scene->geoms.size(); ++i) {
Geom& temp_geo_ref = scene->geoms[i];
if (temp_geo_ref.type == GeomType::MESH) {
cout << "Init mesh geo:" << endl;
// string filename = "C:\\JiaruiYan\\MasterDegreeProjects\\CIS565\\Proj3\\tinygltf_test\\gltf_test\\scene\\Box.gltf";
// string filename = "C:\\JiaruiYan\\MasterDegreeProjects\\CIS565\\Proj3\\Project3-CUDA-Path-Tracer\\read_models\\duck\\Duck.gltf";
string filename = scene->mesh_filename;
std::vector<example::Mesh<float> > meshes;
std::vector<example::Material> materials;
std::vector<example::Texture> textures;
bool ret = LoadGLTF(filename, 1.0, &meshes, &materials, &textures);
if (!ret) {
std::cerr << "Failed to load [ " << filename << " ]" << std::endl;
}
example::Mesh<float>& model_ref = meshes[0];
temp_geo_ref.indices_num = model_ref.faces.size();
float *dev_x_coord, *dev_y_coord, *dev_z_coord;
hipMalloc(&temp_geo_ref.dev_mesh_positions, model_ref.vertices.size() * sizeof(float));
hipMemcpy(temp_geo_ref.dev_mesh_positions, model_ref.vertices.data(), model_ref.vertices.size() * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&dev_x_coord, model_ref.vertices.size() * sizeof(float) / 3);
hipMalloc(&dev_y_coord, model_ref.vertices.size() * sizeof(float) / 3);
hipMalloc(&dev_z_coord, model_ref.vertices.size() * sizeof(float) / 3);
const int blockSize1d = 128;
dim3 numblocksCpyCoord = (model_ref.vertices.size() + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( cpyCoordVals) , dim3(numblocksCpyCoord), dim3(blockSize1d), 0, 0, dev_x_coord, temp_geo_ref.dev_mesh_positions, 0, model_ref.vertices.size());
hipLaunchKernelGGL(( cpyCoordVals) , dim3(numblocksCpyCoord), dim3(blockSize1d), 0, 0, dev_y_coord, temp_geo_ref.dev_mesh_positions, 1, model_ref.vertices.size());
hipLaunchKernelGGL(( cpyCoordVals) , dim3(numblocksCpyCoord), dim3(blockSize1d), 0, 0, dev_z_coord, temp_geo_ref.dev_mesh_positions, 2, model_ref.vertices.size());
float* dev_max_x = thrust::max_element(thrust::device, dev_x_coord, dev_x_coord + model_ref.vertices.size() / 3);
float* dev_min_x = thrust::min_element(thrust::device, dev_x_coord, dev_x_coord + model_ref.vertices.size() / 3);
float host_max_x, host_min_x;
hipMemcpy(&host_max_x, dev_max_x, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&host_min_x, dev_min_x, sizeof(float), hipMemcpyDeviceToHost);
float* dev_max_y = thrust::max_element(thrust::device, dev_y_coord, dev_y_coord + model_ref.vertices.size() / 3);
float* dev_min_y = thrust::min_element(thrust::device, dev_y_coord, dev_y_coord + model_ref.vertices.size() / 3);
float host_max_y, host_min_y;
hipMemcpy(&host_max_y, dev_max_y, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&host_min_y, dev_min_y, sizeof(float), hipMemcpyDeviceToHost);
float* dev_max_z = thrust::max_element(thrust::device, dev_z_coord, dev_z_coord + model_ref.vertices.size() / 3);
float* dev_min_z = thrust::min_element(thrust::device, dev_z_coord, dev_z_coord + model_ref.vertices.size() / 3);
float host_max_z, host_min_z;
hipMemcpy(&host_max_z, dev_max_z, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&host_min_z, dev_min_z, sizeof(float), hipMemcpyDeviceToHost);
cout << "upper corner:(" << host_max_x << ", " << host_max_y << ", " << host_max_z << ")" << endl;
cout << "downward corner:(" << host_min_x << ", " << host_min_y << ", " << host_min_z << ")" << endl;
temp_geo_ref.bounding_box_down_corner[0] = host_min_x - 0.1f;
temp_geo_ref.bounding_box_down_corner[1] = host_min_y - 0.1f;
temp_geo_ref.bounding_box_down_corner[2] = host_min_z - 0.1f;
temp_geo_ref.bounding_box_upper_corner[0] = host_max_x + 0.1f;
temp_geo_ref.bounding_box_upper_corner[1] = host_max_y + 0.1f;
temp_geo_ref.bounding_box_upper_corner[2] = host_max_z + 0.1f;
hipMalloc(&temp_geo_ref.dev_mesh_indices, model_ref.faces.size() * sizeof(unsigned int));
hipMemcpy(temp_geo_ref.dev_mesh_indices, model_ref.faces.data(), model_ref.faces.size() * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMalloc(&temp_geo_ref.dev_mesh_normals, model_ref.facevarying_normals.size() * sizeof(float));
hipMemcpy(temp_geo_ref.dev_mesh_normals, model_ref.facevarying_normals.data(), model_ref.facevarying_normals.size() * sizeof(float), hipMemcpyHostToDevice);
/*
cout << "uvs:" << endl;
for (int i = 0; i < meshes[0].facevarying_uvs.size() / 2; i = i + 1) {
cout << "uv[" << i << "]:(" << meshes[0].facevarying_uvs[i * 2] << ", " << meshes[0].facevarying_uvs[i * 2 + 1] << ")" << endl;
}
*/
if (textures.size() != 0) {
// dim3 numblocksUVWarp = (model_ref.facevarying_uvs.size() + blockSize1d - 1) / blockSize1d;
hipMalloc(&temp_geo_ref.dev_uvs, model_ref.facevarying_uvs.size() * sizeof(float));
hipMemcpy(temp_geo_ref.dev_uvs, model_ref.facevarying_uvs.data(), model_ref.facevarying_uvs.size() * sizeof(float), hipMemcpyHostToDevice);
// warpUV <<<numblocksUVWarp, blockSize1d>>> (model_ref.facevarying_uvs.size(), temp_geo_ref.dev_uvs);
/* NOTE: UV is facevarying instead of vertices varying.
for (int i = 0; i < model_ref.faces.size() / 3; ++i) {
int idx1 = model_ref.faces[i * 3];
int idx2 = model_ref.faces[i * 3 + 1];
int idx3 = model_ref.faces[i * 3 + 2];
cout << "Triangle(" << idx1 << ", " << idx2 << ", " << idx3 << ")" << endl;
cout << "UV" << idx1 << ":(" << model_ref.facevarying_uvs[idx1 * 2] << ", " << model_ref.facevarying_uvs[idx1 * 2 + 1] << ")" << endl;
cout << "UV" << idx2 << ":(" << model_ref.facevarying_uvs[idx2 * 2] << ", " << model_ref.facevarying_uvs[idx2 * 2 + 1] << ")" << endl;
cout << "UV" << idx3 << ":(" << model_ref.facevarying_uvs[idx3 * 2] << ", " << model_ref.facevarying_uvs[idx3 * 2 + 1] << ")" << endl;
// cout << "UV:(" << model_ref.facevarying_uvs[i * 2] << ", " << model_ref.facevarying_uvs[i * 2 + 1] << ")" << endl;
}
*/
int image_size = textures[0].components * textures[0].width * textures[0].height * sizeof(unsigned char);
hipMalloc(&temp_geo_ref.dev_texture, image_size * sizeof(unsigned char));
hipMemcpy(temp_geo_ref.dev_texture, textures[0].image, image_size * sizeof(unsigned char), hipMemcpyHostToDevice);
temp_geo_ref.hasTexture = true;
temp_geo_ref.texture_width = textures[0].width;
temp_geo_ref.texture_height = textures[0].height;
}
else {
temp_geo_ref.hasTexture = false;
}
hipFree(dev_x_coord);
hipFree(dev_y_coord);
hipFree(dev_z_coord);
}
}
checkCUDAError("Mesh init");
}
void pathtraceFree(Scene* scene) {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_cache_intersections);
hipFree(dev_cache_paths);
checkCUDAError("pathtraceFree");
}
void meshFree(Scene* scene) {
for (int i = 0; scene != nullptr && i < scene->geoms.size(); ++i) {
Geom& temp_geo_ref = scene->geoms[i];
if (temp_geo_ref.type == GeomType::MESH) {
if (temp_geo_ref.dev_mesh_indices != nullptr) {
hipFree(temp_geo_ref.dev_mesh_indices);
}
if (temp_geo_ref.dev_mesh_normals != nullptr) {
hipFree(temp_geo_ref.dev_mesh_normals);
}
if (temp_geo_ref.dev_mesh_positions != nullptr) {
hipFree(temp_geo_ref.dev_mesh_positions);
}
if (temp_geo_ref.hasTexture) {
if (temp_geo_ref.dev_uvs != nullptr) {
hipFree(temp_geo_ref.dev_uvs);
}
if (temp_geo_ref.dev_texture != nullptr) {
hipFree(temp_geo_ref.dev_texture);
}
}
}
}
checkCUDAError("GeoFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__device__ glm::vec2 ConcentricSampleDisk(const glm::vec2 u) {
// Map uniform random numbers to [-1, 1]
glm::vec2 uOffset = 2.f * u - glm::vec2(1, 1);
// Handle degeneracy at the origin
if (uOffset.x == 0 && uOffset.y == 0) {
return glm::vec2(0.f);
}
// Apply concentric mapping to point
float theta, r;
if (fabs(uOffset.x) > fabs(uOffset.y)) {
r = uOffset.x;
theta = 0.25 * 3.1415926 * (uOffset.y / uOffset.x);
}
else {
r = uOffset.y;
theta = 0.5 * 3.1415926 - 0.25 * 3.1415926 * (uOffset.x / uOffset.y);
}
return r * glm::vec2(cosf(theta), sinf(theta));
}
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
#ifdef CACHE_BOUNCE
// If we choose to cache the first bounce, then it would not be jittered.
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
#else
// We will jitter rays if there is not first bounce cache.
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng) - 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + u01(rng) - 0.5f)
);
#endif // CACHE_BOUNCE
bool thin_len_cam = false;
#ifdef THIN_LENS_CAMERA
thin_len_cam = true;
#endif // THIN_LENS_CAMERA
float lensRadius = 0.1f;
float focalDistance = cam.focal_length;
if (thin_len_cam) {
// Sample point on lens
glm::vec2 pLens = lensRadius * ConcentricSampleDisk(glm::vec2(u01(rng), u01(rng)));
// Compute point on plane of focus
float ft = focalDistance / glm::dot(segment.ray.direction, cam.view);
glm::vec3 pFocus = ft * segment.ray.direction + segment.ray.origin;
// Update ray for effect of lens
segment.ray.origin = cam.position + cam.up * pLens[1] + cam.right * pLens[0];
// segment.ray.origin = cam.position;
segment.ray.direction = glm::normalize(pFocus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
pathSegments[path_index].ori_id = path_index;
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec2 tmp_uv;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH)
{
t = meshIntersectionTest(geom, pathSegment.ray, tmp_uv, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].outside = outside;
intersections[path_index].hit_type = geoms[hit_geom_index].type;
intersections[path_index].hasTexture = geoms[hit_geom_index].hasTexture;
intersections[path_index].geomId = hit_geom_index;
if (geoms[hit_geom_index].type == MESH && geoms[hit_geom_index].hasTexture) {
intersections[path_index].uv = tmp_uv;
}
if (abs(normal.x) > abs(normal.y)) {
intersections[path_index].tangent = glm::vec3(-normal.z, 0.f, normal.x) / sqrt(normal.x * normal.x + normal.z * normal.z);
}
else {
intersections[path_index].tangent = glm::vec3(0.f, normal.z, -normal.y) / sqrt(normal.y * normal.y + normal.z * normal.z);
}
intersections[path_index].bitangent = glm::cross(normal, intersections[path_index].tangent);
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
__device__ int directlight_shadowtest(Ray tempRay, Geom* geoms, int geoms_size) {
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec2 tmp_uv;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom& geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, tempRay, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, tempRay, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH)
{
t = meshIntersectionTest(geom, tempRay, tmp_uv, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
return hit_geom_index;
}
__global__ void directlight_shade_bounce(int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Geom* geoms
, int geoms_size, Material* materials, glm::vec3* image) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
// int ori_idx = pathSegments[idx].ori_id;
// ShadeableIntersection intersection = shadeableIntersections[ori_idx];
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) {
// If the intersection exists
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
// thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
else {
// direct shading:
const ShadeableIntersection temp_intersect = shadeableIntersections[idx];
glm::vec3 intersect_pos = temp_intersect.t * pathSegments[idx].ray.direction + pathSegments[idx].ray.origin;
thrust::uniform_real_distribution<float> u01(0, 1);
// Randomly select a light source
// Max 10 lights in the scene for direct light;
int light_num = 0;
int light_idxs[10];
for (int i = 0; i < geoms_size; ++i) {
Geom& temp_geo_ref = geoms[i];
Material& temp_mat_ref = materials[temp_geo_ref.materialid];
if (temp_mat_ref.emittance > 0.f) {
// This is a light
light_idxs[light_num] = i;
++light_num;
}
}
int random_light_idx = glm::min((int)(u01(rng) * light_num), light_num - 1);
Geom& light = geoms[light_idxs[random_light_idx]];
// Get an intersection on the surface of its shape
glm::vec4 pObj(u01(rng) - 0.5f, -0.5f, u01(rng) - 0.5f, 1.f);
float area = light.scale.x * light.scale.z;
glm::vec4 local_normal(0.f, -1.f, 0.f, 0.f);
glm::vec3 light_intersect = multiplyMV(light.transform, pObj);
glm::vec3 light_normal = glm::normalize(multiplyMV(light.invTranspose, local_normal));
float pdf = glm::length2(light_intersect - intersect_pos) / area;
// Check if the resultant PDF is zero and the
// resultant Intersection are the same point in space, and return black if this is the case.
if (pdf == 0.f || glm::l2Norm(light_intersect - intersect_pos) < FLT_EPSILON) {
pathSegments[idx].color = glm::vec3(0.f);
}
else {
// Set i to the normalized vector from the reference Intersection's
// point to the Shape's intersection point.
glm::vec3 wi = glm::normalize(light_intersect - intersect_pos);
// Return the light emitted along i from our intersection point.
Material& light_material = materials[light.materialid];
glm::vec3 light_L = glm::dot(light_normal, -wi) > 0.f ? (light_material.color * light_material.emittance) : glm::vec3(0.f);
// printf("light_normal:(%f, %f, %f)\nwi:(%f, %f, %f)\n\n", light_normal.x, light_normal.y, light_normal.z, wi.x, wi.y, wi.z);
/*
if (glm::l2Norm(light_L) != 0.f) {
printf("light_L != 0\n");
}
*/
// Shadow test
Ray tempRay;
tempRay.direction = wi;
tempRay.origin = intersect_pos + 0.01f * temp_intersect.surfaceNormal;
int hit_geom_index = directlight_shadowtest(tempRay, geoms, geoms_size);
if (hit_geom_index == -1 || hit_geom_index != light_idxs[random_light_idx]) {
pathSegments[idx].color = glm::vec3(0.f);
}
else {
// Evaluate the remaining component of the LTE
// Texture color:
if (temp_intersect.hit_type == MESH && temp_intersect.hasTexture) {
Geom& temp_geo_ref = geoms[temp_intersect.geomId];
float temp_u = temp_intersect.uv[0];
float temp_v = temp_intersect.uv[1];
/*
if (temp_intersect.uv[0] > 1 || temp_intersect.uv[0] < 0) {
temp_u = temp_intersect.uv[0] - glm::floor(temp_intersect.uv[0]);
if (temp_u == 0) {
}
}
if (temp_intersect.uv[1] > 1 || temp_intersect.uv[1] < 0) {
temp_v = temp_intersect.uv[1] - glm::floor(temp_intersect.uv[1]);
}
*/
int coordU = (int)(temp_u * (temp_geo_ref.texture_width));
int coordV = (int)(temp_v * (temp_geo_ref.texture_height));
if (coordU >= 512) {
printf("coordU >= 512: %d\n", coordU);
coordU %= 512;
}
if (coordV >= 512) {
printf("coordV >= 512: %d\n", coordV);
coordV %= 512;
}
int pixel_idx = coordV * temp_geo_ref.texture_width + coordU;
// int pixel_idx = coordU * temp_geo_ref.texture_width + coordV;
unsigned int colR = (unsigned int) temp_geo_ref.dev_texture[pixel_idx * 4];
unsigned int colG = (unsigned int) temp_geo_ref.dev_texture[pixel_idx * 4 + 1];
unsigned int colB = (unsigned int) temp_geo_ref.dev_texture[pixel_idx * 4 + 2];
materialColor[0] = (float)colR / 255.f;
materialColor[1] = (float)colG / 255.f;
materialColor[2] = (float)colB / 255.f;
// printf("UV:(%f, %f)\n", temp_intersect.uv[0], temp_intersect.uv[1]);
// printf("UVCoord:(%d, %d)\n", coordU, coordV);
/*
if (colR != 225 || colG != 191 || colB != 0) {
printf("(%d, %d, %d)\n", colR, colG, colB);
}*/
// printf("(%d, %d)\n", temp_geo_ref.texture_width, temp_geo_ref.texture_height);
}
glm::vec3 f = materialColor / PI;
pathSegments[idx].color = f * light_L * glm::abs(glm::dot(wi, temp_intersect.surfaceNormal)) / (pdf / light_num);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
// if (glm::l2Norm(pathSegments[idx].color) != 0.f) {
// printf("light_num:%d\n", light_num);
// printf("pathSegments[idx].color:(%f, %f, %f)\n", pathSegments[idx].color.x, pathSegments[idx].color.y, pathSegments[idx].color.z);
// }
// printf("temp_intersect.surfaceNormal:(%f, %f, %f)\nwi:(%f, %f, %f)\n\n", temp_intersect.surfaceNormal.x, temp_intersect.surfaceNormal.y, temp_intersect.surfaceNormal.z, wi.x, wi.y, wi.z);
// printf("light_L:(%f, %f, %f)\n", light_L.x, light_L.y, light_L.z);
// printf("pathSegments[idx].color:(%f, %f, %f)\n", pathSegments[idx].color.x, pathSegments[idx].color.y, pathSegments[idx].color.z);
}
}
}
}
else {
// If there was no intersection, color the ray black.
pathSegments[idx].color = glm::vec3(0.f);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
}
}
__global__ void shade_image(int num_paths, PathSegment* pathSegments, Geom* geoms, int geoms_size, glm::vec3* image) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
int mesh_idx = 0;
for (int i = 0; i < geoms_size; ++i) {
if (geoms[i].hasTexture) {
mesh_idx = i;
}
}
unsigned int colR = (unsigned int) geoms[mesh_idx].dev_texture[pathSegments[idx].pixelIndex * 4];
unsigned int colG = (unsigned int) geoms[mesh_idx].dev_texture[pathSegments[idx].pixelIndex * 4 + 1];
unsigned int colB = (unsigned int) geoms[mesh_idx].dev_texture[pathSegments[idx].pixelIndex * 4 + 2];
image[pathSegments[idx].pixelIndex] += glm::vec3((float)colR / 255.f, (float)colG / 255.f, (float)colB / 255.f);
}
}
__global__ void shade_bounce(int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Geom* geoms, Material* materials, glm::vec3* image) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
// int ori_idx = pathSegments[idx].ori_id;
// ShadeableIntersection intersection = shadeableIntersections[ori_idx];
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) {
// If the intersection exists
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
// thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
else {
// BSDF accumulate:
// const ShadeableIntersection temp_intersect = shadeableIntersections[ori_idx];
const ShadeableIntersection temp_intersect = shadeableIntersections[idx];
glm::vec3 intersect_pos = temp_intersect.t * pathSegments[idx].ray.direction + pathSegments[idx].ray.origin;
scatterRay(pathSegments[idx], intersect_pos, temp_intersect.surfaceNormal, temp_intersect.outside, temp_intersect, geoms, material, rng);
}
}
else {
// If there was no intersection, color the ray black.
pathSegments[idx].color = glm::vec3(0.f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct zero_bounce
{
__host__ __device__
bool operator()(const PathSegment x)
{
return x.remainingBounces == 0;
}
};
struct mat_sort
{
__host__ __device__
bool operator()(const ShadeableIntersection& i1, const ShadeableIntersection& i2) {
return i1.materialId > i2.materialId;
}
};
__global__ void print_remain_bounces(int nPaths, PathSegment* iterationPaths) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths) {
if (index == 0) {
printf("remainbounces:%d\n", iterationPaths[index].remainingBounces);
}
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
bool iterationComplete = false;
// std::cout << "iter:" << iter << std::endl;
// TODO: perform one iteration of path tracing
#ifdef CACHE_BOUNCE
if (iter == 1) {
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
}
else {
// Generate ray from cached first intersection
hipMemcpy(dev_intersections, dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
hipMemcpy(dev_paths, dev_cache_paths, pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
dim3 numblocksPathSegmentTracing = (pixelcount + blockSize1d - 1) / blockSize1d;
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (pixelcount, dev_cache_paths);
shade_bounce << <numblocksPathSegmentTracing, blockSize1d >> > (iter, pixelcount, dev_intersections, dev_paths, dev_geoms, dev_materials, dev_image);
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (pixelcount, dev_paths);
PathSegment* new_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + pixelcount, zero_bounce());
if (new_end == dev_paths) {
iterationComplete = true;
}
else {
num_paths = new_end - dev_paths;
}
depth++;
}
#else
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
#endif // CACHE_BOUNCE
// std::cout << "traceDepth:" << traceDepth << std::endl;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
while (!iterationComplete) {
std::cout << "Depth:" << depth << std::endl;
std::cout << "Num of path:" << num_paths << std::endl << std::endl;
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
#ifdef MATERIAL_SORT
// TODO: Sort rays by material
thrust::stable_sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, mat_sort());
#endif //
#ifdef CACHE_BOUNCE
// Cache the first intersection
if (iter == 1 && depth == 1) {
hipMemcpy(dev_cache_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
hipMemcpy(dev_cache_paths, dev_paths, pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (pixelcount, dev_paths);
}
#endif // CACHE_BOUNCE
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shade_bounce) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_geoms, dev_materials, dev_image);
// TODO: should be based off stream compaction results.
// Stream compact away all of the terminated paths.
PathSegment* new_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, zero_bounce());
// std::cout << "num_path before remove:" << num_paths << std::endl;
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (num_paths, dev_paths);
if (new_end == dev_paths) {
iterationComplete = true;
}
else {
num_paths = new_end - dev_paths;
}
// std::cout << "num_path after remove:" << num_paths << std::endl;
// iterationComplete = true;
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
void directlight_pathtrace(uchar4* pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
bool iterationComplete = false;
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
// std::cout << "traceDepth:" << traceDepth << std::endl;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
while (!iterationComplete) {
// std::cout << "Num of path:" << num_paths << std::endl;
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( computeIntersections) , numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
#ifdef MATERIAL_SORT
// TODO: Sort rays by material
thrust::stable_sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, mat_sort());
#endif //
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
directlight_shade_bounce , numblocksPathSegmentTracing, blockSize1d, 0, 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths, dev_geoms,
hst_scene->geoms.size(),
dev_materials,
dev_image);
// dim3 numblocksImageShade = (512 * 512 + blockSize1d - 1) / blockSize1d;
// shade_image <<<numblocksImageShade, blockSize1d>>> (num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_image);
// TODO: should be based off stream compaction results.
// Stream compact away all of the terminated paths.
PathSegment* new_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, zero_bounce());
// std::cout << "num_path before remove:" << num_paths << std::endl;
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (num_paths, dev_paths);
if (new_end == dev_paths) {
iterationComplete = true;
}
else {
num_paths = new_end - dev_paths;
}
// std::cout << "num_path after remove:" << num_paths << std::endl;
iterationComplete = true;
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("direct light pathtrace");
}
| 1b5c41170ecc471cb8c3bcbc1ae4494506fc2e8d.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <gltf-loader.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define MATERIAL_SORT
#define CACHE_BOUNCE // Determine whether cache the first bounce or do the stochastic sampling
// #define THIN_LENS_CAMERA
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
static ShadeableIntersection* dev_cache_intersections = NULL;
static PathSegment* dev_cache_paths = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_cache_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment));
checkCUDAError("pathtraceInit");
}
__global__ void cpyCoordVals(float* tar, float* src, int offset, int src_length) {
int src_index = blockIdx.x * blockDim.x + threadIdx.x;
if (src_index >= src_length) {
return;
}
int tar_index = src_index / 3;
if (src_index % 3 == offset) {
tar[tar_index] = src[src_index];
}
}
__global__ void warpUV(int uv_size, float* dev_uv) {
int uv_index = blockIdx.x * blockDim.x + threadIdx.x;
if (uv_index >= uv_size) {
return;
}
float uv_val = dev_uv[uv_index];
if (uv_val > 1 || uv_val < 0) {
dev_uv[uv_index] = uv_val - glm::floor(uv_val);
}
}
void meshInit(Scene* scene) {
for (int i = 0; i < scene->geoms.size(); ++i) {
Geom& temp_geo_ref = scene->geoms[i];
if (temp_geo_ref.type == GeomType::MESH) {
cout << "Init mesh geo:" << endl;
// string filename = "C:\\JiaruiYan\\MasterDegreeProjects\\CIS565\\Proj3\\tinygltf_test\\gltf_test\\scene\\Box.gltf";
// string filename = "C:\\JiaruiYan\\MasterDegreeProjects\\CIS565\\Proj3\\Project3-CUDA-Path-Tracer\\read_models\\duck\\Duck.gltf";
string filename = scene->mesh_filename;
std::vector<example::Mesh<float> > meshes;
std::vector<example::Material> materials;
std::vector<example::Texture> textures;
bool ret = LoadGLTF(filename, 1.0, &meshes, &materials, &textures);
if (!ret) {
std::cerr << "Failed to load [ " << filename << " ]" << std::endl;
}
example::Mesh<float>& model_ref = meshes[0];
temp_geo_ref.indices_num = model_ref.faces.size();
float *dev_x_coord, *dev_y_coord, *dev_z_coord;
cudaMalloc(&temp_geo_ref.dev_mesh_positions, model_ref.vertices.size() * sizeof(float));
cudaMemcpy(temp_geo_ref.dev_mesh_positions, model_ref.vertices.data(), model_ref.vertices.size() * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&dev_x_coord, model_ref.vertices.size() * sizeof(float) / 3);
cudaMalloc(&dev_y_coord, model_ref.vertices.size() * sizeof(float) / 3);
cudaMalloc(&dev_z_coord, model_ref.vertices.size() * sizeof(float) / 3);
const int blockSize1d = 128;
dim3 numblocksCpyCoord = (model_ref.vertices.size() + blockSize1d - 1) / blockSize1d;
cpyCoordVals <<<numblocksCpyCoord, blockSize1d>>> (dev_x_coord, temp_geo_ref.dev_mesh_positions, 0, model_ref.vertices.size());
cpyCoordVals <<<numblocksCpyCoord, blockSize1d>>> (dev_y_coord, temp_geo_ref.dev_mesh_positions, 1, model_ref.vertices.size());
cpyCoordVals <<<numblocksCpyCoord, blockSize1d>>> (dev_z_coord, temp_geo_ref.dev_mesh_positions, 2, model_ref.vertices.size());
float* dev_max_x = thrust::max_element(thrust::device, dev_x_coord, dev_x_coord + model_ref.vertices.size() / 3);
float* dev_min_x = thrust::min_element(thrust::device, dev_x_coord, dev_x_coord + model_ref.vertices.size() / 3);
float host_max_x, host_min_x;
cudaMemcpy(&host_max_x, dev_max_x, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&host_min_x, dev_min_x, sizeof(float), cudaMemcpyDeviceToHost);
float* dev_max_y = thrust::max_element(thrust::device, dev_y_coord, dev_y_coord + model_ref.vertices.size() / 3);
float* dev_min_y = thrust::min_element(thrust::device, dev_y_coord, dev_y_coord + model_ref.vertices.size() / 3);
float host_max_y, host_min_y;
cudaMemcpy(&host_max_y, dev_max_y, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&host_min_y, dev_min_y, sizeof(float), cudaMemcpyDeviceToHost);
float* dev_max_z = thrust::max_element(thrust::device, dev_z_coord, dev_z_coord + model_ref.vertices.size() / 3);
float* dev_min_z = thrust::min_element(thrust::device, dev_z_coord, dev_z_coord + model_ref.vertices.size() / 3);
float host_max_z, host_min_z;
cudaMemcpy(&host_max_z, dev_max_z, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&host_min_z, dev_min_z, sizeof(float), cudaMemcpyDeviceToHost);
cout << "upper corner:(" << host_max_x << ", " << host_max_y << ", " << host_max_z << ")" << endl;
cout << "downward corner:(" << host_min_x << ", " << host_min_y << ", " << host_min_z << ")" << endl;
temp_geo_ref.bounding_box_down_corner[0] = host_min_x - 0.1f;
temp_geo_ref.bounding_box_down_corner[1] = host_min_y - 0.1f;
temp_geo_ref.bounding_box_down_corner[2] = host_min_z - 0.1f;
temp_geo_ref.bounding_box_upper_corner[0] = host_max_x + 0.1f;
temp_geo_ref.bounding_box_upper_corner[1] = host_max_y + 0.1f;
temp_geo_ref.bounding_box_upper_corner[2] = host_max_z + 0.1f;
cudaMalloc(&temp_geo_ref.dev_mesh_indices, model_ref.faces.size() * sizeof(unsigned int));
cudaMemcpy(temp_geo_ref.dev_mesh_indices, model_ref.faces.data(), model_ref.faces.size() * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMalloc(&temp_geo_ref.dev_mesh_normals, model_ref.facevarying_normals.size() * sizeof(float));
cudaMemcpy(temp_geo_ref.dev_mesh_normals, model_ref.facevarying_normals.data(), model_ref.facevarying_normals.size() * sizeof(float), cudaMemcpyHostToDevice);
/*
cout << "uvs:" << endl;
for (int i = 0; i < meshes[0].facevarying_uvs.size() / 2; i = i + 1) {
cout << "uv[" << i << "]:(" << meshes[0].facevarying_uvs[i * 2] << ", " << meshes[0].facevarying_uvs[i * 2 + 1] << ")" << endl;
}
*/
if (textures.size() != 0) {
// dim3 numblocksUVWarp = (model_ref.facevarying_uvs.size() + blockSize1d - 1) / blockSize1d;
cudaMalloc(&temp_geo_ref.dev_uvs, model_ref.facevarying_uvs.size() * sizeof(float));
cudaMemcpy(temp_geo_ref.dev_uvs, model_ref.facevarying_uvs.data(), model_ref.facevarying_uvs.size() * sizeof(float), cudaMemcpyHostToDevice);
// warpUV <<<numblocksUVWarp, blockSize1d>>> (model_ref.facevarying_uvs.size(), temp_geo_ref.dev_uvs);
/* NOTE: UV is facevarying instead of vertices varying.
for (int i = 0; i < model_ref.faces.size() / 3; ++i) {
int idx1 = model_ref.faces[i * 3];
int idx2 = model_ref.faces[i * 3 + 1];
int idx3 = model_ref.faces[i * 3 + 2];
cout << "Triangle(" << idx1 << ", " << idx2 << ", " << idx3 << ")" << endl;
cout << "UV" << idx1 << ":(" << model_ref.facevarying_uvs[idx1 * 2] << ", " << model_ref.facevarying_uvs[idx1 * 2 + 1] << ")" << endl;
cout << "UV" << idx2 << ":(" << model_ref.facevarying_uvs[idx2 * 2] << ", " << model_ref.facevarying_uvs[idx2 * 2 + 1] << ")" << endl;
cout << "UV" << idx3 << ":(" << model_ref.facevarying_uvs[idx3 * 2] << ", " << model_ref.facevarying_uvs[idx3 * 2 + 1] << ")" << endl;
// cout << "UV:(" << model_ref.facevarying_uvs[i * 2] << ", " << model_ref.facevarying_uvs[i * 2 + 1] << ")" << endl;
}
*/
int image_size = textures[0].components * textures[0].width * textures[0].height * sizeof(unsigned char);
cudaMalloc(&temp_geo_ref.dev_texture, image_size * sizeof(unsigned char));
cudaMemcpy(temp_geo_ref.dev_texture, textures[0].image, image_size * sizeof(unsigned char), cudaMemcpyHostToDevice);
temp_geo_ref.hasTexture = true;
temp_geo_ref.texture_width = textures[0].width;
temp_geo_ref.texture_height = textures[0].height;
}
else {
temp_geo_ref.hasTexture = false;
}
cudaFree(dev_x_coord);
cudaFree(dev_y_coord);
cudaFree(dev_z_coord);
}
}
checkCUDAError("Mesh init");
}
void pathtraceFree(Scene* scene) {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_cache_intersections);
cudaFree(dev_cache_paths);
checkCUDAError("pathtraceFree");
}
void meshFree(Scene* scene) {
for (int i = 0; scene != nullptr && i < scene->geoms.size(); ++i) {
Geom& temp_geo_ref = scene->geoms[i];
if (temp_geo_ref.type == GeomType::MESH) {
if (temp_geo_ref.dev_mesh_indices != nullptr) {
cudaFree(temp_geo_ref.dev_mesh_indices);
}
if (temp_geo_ref.dev_mesh_normals != nullptr) {
cudaFree(temp_geo_ref.dev_mesh_normals);
}
if (temp_geo_ref.dev_mesh_positions != nullptr) {
cudaFree(temp_geo_ref.dev_mesh_positions);
}
if (temp_geo_ref.hasTexture) {
if (temp_geo_ref.dev_uvs != nullptr) {
cudaFree(temp_geo_ref.dev_uvs);
}
if (temp_geo_ref.dev_texture != nullptr) {
cudaFree(temp_geo_ref.dev_texture);
}
}
}
}
checkCUDAError("GeoFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__device__ glm::vec2 ConcentricSampleDisk(const glm::vec2 u) {
// Map uniform random numbers to [-1, 1]
glm::vec2 uOffset = 2.f * u - glm::vec2(1, 1);
// Handle degeneracy at the origin
if (uOffset.x == 0 && uOffset.y == 0) {
return glm::vec2(0.f);
}
// Apply concentric mapping to point
float theta, r;
if (fabs(uOffset.x) > fabs(uOffset.y)) {
r = uOffset.x;
theta = 0.25 * 3.1415926 * (uOffset.y / uOffset.x);
}
else {
r = uOffset.y;
theta = 0.5 * 3.1415926 - 0.25 * 3.1415926 * (uOffset.x / uOffset.y);
}
return r * glm::vec2(cosf(theta), sinf(theta));
}
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
#ifdef CACHE_BOUNCE
// If we choose to cache the first bounce, then it would not be jittered.
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
#else
// We will jitter rays if there is not first bounce cache.
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng) - 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + u01(rng) - 0.5f)
);
#endif // CACHE_BOUNCE
bool thin_len_cam = false;
#ifdef THIN_LENS_CAMERA
thin_len_cam = true;
#endif // THIN_LENS_CAMERA
float lensRadius = 0.1f;
float focalDistance = cam.focal_length;
if (thin_len_cam) {
// Sample point on lens
glm::vec2 pLens = lensRadius * ConcentricSampleDisk(glm::vec2(u01(rng), u01(rng)));
// Compute point on plane of focus
float ft = focalDistance / glm::dot(segment.ray.direction, cam.view);
glm::vec3 pFocus = ft * segment.ray.direction + segment.ray.origin;
// Update ray for effect of lens
segment.ray.origin = cam.position + cam.up * pLens[1] + cam.right * pLens[0];
// segment.ray.origin = cam.position;
segment.ray.direction = glm::normalize(pFocus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
pathSegments[path_index].ori_id = path_index;
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec2 tmp_uv;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH)
{
t = meshIntersectionTest(geom, pathSegment.ray, tmp_uv, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].outside = outside;
intersections[path_index].hit_type = geoms[hit_geom_index].type;
intersections[path_index].hasTexture = geoms[hit_geom_index].hasTexture;
intersections[path_index].geomId = hit_geom_index;
if (geoms[hit_geom_index].type == MESH && geoms[hit_geom_index].hasTexture) {
intersections[path_index].uv = tmp_uv;
}
if (abs(normal.x) > abs(normal.y)) {
intersections[path_index].tangent = glm::vec3(-normal.z, 0.f, normal.x) / sqrt(normal.x * normal.x + normal.z * normal.z);
}
else {
intersections[path_index].tangent = glm::vec3(0.f, normal.z, -normal.y) / sqrt(normal.y * normal.y + normal.z * normal.z);
}
intersections[path_index].bitangent = glm::cross(normal, intersections[path_index].tangent);
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
__device__ int directlight_shadowtest(Ray tempRay, Geom* geoms, int geoms_size) {
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
glm::vec2 tmp_uv;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom& geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, tempRay, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, tempRay, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH)
{
t = meshIntersectionTest(geom, tempRay, tmp_uv, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
return hit_geom_index;
}
__global__ void directlight_shade_bounce(int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Geom* geoms
, int geoms_size, Material* materials, glm::vec3* image) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
// int ori_idx = pathSegments[idx].ori_id;
// ShadeableIntersection intersection = shadeableIntersections[ori_idx];
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) {
// If the intersection exists
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
// thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
else {
// direct shading:
const ShadeableIntersection temp_intersect = shadeableIntersections[idx];
glm::vec3 intersect_pos = temp_intersect.t * pathSegments[idx].ray.direction + pathSegments[idx].ray.origin;
thrust::uniform_real_distribution<float> u01(0, 1);
// Randomly select a light source
// Max 10 lights in the scene for direct light;
int light_num = 0;
int light_idxs[10];
for (int i = 0; i < geoms_size; ++i) {
Geom& temp_geo_ref = geoms[i];
Material& temp_mat_ref = materials[temp_geo_ref.materialid];
if (temp_mat_ref.emittance > 0.f) {
// This is a light
light_idxs[light_num] = i;
++light_num;
}
}
int random_light_idx = glm::min((int)(u01(rng) * light_num), light_num - 1);
Geom& light = geoms[light_idxs[random_light_idx]];
// Get an intersection on the surface of its shape
glm::vec4 pObj(u01(rng) - 0.5f, -0.5f, u01(rng) - 0.5f, 1.f);
float area = light.scale.x * light.scale.z;
glm::vec4 local_normal(0.f, -1.f, 0.f, 0.f);
glm::vec3 light_intersect = multiplyMV(light.transform, pObj);
glm::vec3 light_normal = glm::normalize(multiplyMV(light.invTranspose, local_normal));
float pdf = glm::length2(light_intersect - intersect_pos) / area;
// Check if the resultant PDF is zero and the
// resultant Intersection are the same point in space, and return black if this is the case.
if (pdf == 0.f || glm::l2Norm(light_intersect - intersect_pos) < FLT_EPSILON) {
pathSegments[idx].color = glm::vec3(0.f);
}
else {
// Set ωi to the normalized vector from the reference Intersection's
// point to the Shape's intersection point.
glm::vec3 wi = glm::normalize(light_intersect - intersect_pos);
// Return the light emitted along ωi from our intersection point.
Material& light_material = materials[light.materialid];
glm::vec3 light_L = glm::dot(light_normal, -wi) > 0.f ? (light_material.color * light_material.emittance) : glm::vec3(0.f);
// printf("light_normal:(%f, %f, %f)\nwi:(%f, %f, %f)\n\n", light_normal.x, light_normal.y, light_normal.z, wi.x, wi.y, wi.z);
/*
if (glm::l2Norm(light_L) != 0.f) {
printf("light_L != 0\n");
}
*/
// Shadow test
Ray tempRay;
tempRay.direction = wi;
tempRay.origin = intersect_pos + 0.01f * temp_intersect.surfaceNormal;
int hit_geom_index = directlight_shadowtest(tempRay, geoms, geoms_size);
if (hit_geom_index == -1 || hit_geom_index != light_idxs[random_light_idx]) {
pathSegments[idx].color = glm::vec3(0.f);
}
else {
// Evaluate the remaining component of the LTE
// Texture color:
if (temp_intersect.hit_type == MESH && temp_intersect.hasTexture) {
Geom& temp_geo_ref = geoms[temp_intersect.geomId];
float temp_u = temp_intersect.uv[0];
float temp_v = temp_intersect.uv[1];
/*
if (temp_intersect.uv[0] > 1 || temp_intersect.uv[0] < 0) {
temp_u = temp_intersect.uv[0] - glm::floor(temp_intersect.uv[0]);
if (temp_u == 0) {
}
}
if (temp_intersect.uv[1] > 1 || temp_intersect.uv[1] < 0) {
temp_v = temp_intersect.uv[1] - glm::floor(temp_intersect.uv[1]);
}
*/
int coordU = (int)(temp_u * (temp_geo_ref.texture_width));
int coordV = (int)(temp_v * (temp_geo_ref.texture_height));
if (coordU >= 512) {
printf("coordU >= 512: %d\n", coordU);
coordU %= 512;
}
if (coordV >= 512) {
printf("coordV >= 512: %d\n", coordV);
coordV %= 512;
}
int pixel_idx = coordV * temp_geo_ref.texture_width + coordU;
// int pixel_idx = coordU * temp_geo_ref.texture_width + coordV;
unsigned int colR = (unsigned int) temp_geo_ref.dev_texture[pixel_idx * 4];
unsigned int colG = (unsigned int) temp_geo_ref.dev_texture[pixel_idx * 4 + 1];
unsigned int colB = (unsigned int) temp_geo_ref.dev_texture[pixel_idx * 4 + 2];
materialColor[0] = (float)colR / 255.f;
materialColor[1] = (float)colG / 255.f;
materialColor[2] = (float)colB / 255.f;
// printf("UV:(%f, %f)\n", temp_intersect.uv[0], temp_intersect.uv[1]);
// printf("UVCoord:(%d, %d)\n", coordU, coordV);
/*
if (colR != 225 || colG != 191 || colB != 0) {
printf("(%d, %d, %d)\n", colR, colG, colB);
}*/
// printf("(%d, %d)\n", temp_geo_ref.texture_width, temp_geo_ref.texture_height);
}
glm::vec3 f = materialColor / PI;
pathSegments[idx].color = f * light_L * glm::abs(glm::dot(wi, temp_intersect.surfaceNormal)) / (pdf / light_num);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
// if (glm::l2Norm(pathSegments[idx].color) != 0.f) {
// printf("light_num:%d\n", light_num);
// printf("pathSegments[idx].color:(%f, %f, %f)\n", pathSegments[idx].color.x, pathSegments[idx].color.y, pathSegments[idx].color.z);
// }
// printf("temp_intersect.surfaceNormal:(%f, %f, %f)\nwi:(%f, %f, %f)\n\n", temp_intersect.surfaceNormal.x, temp_intersect.surfaceNormal.y, temp_intersect.surfaceNormal.z, wi.x, wi.y, wi.z);
// printf("light_L:(%f, %f, %f)\n", light_L.x, light_L.y, light_L.z);
// printf("pathSegments[idx].color:(%f, %f, %f)\n", pathSegments[idx].color.x, pathSegments[idx].color.y, pathSegments[idx].color.z);
}
}
}
}
else {
// If there was no intersection, color the ray black.
pathSegments[idx].color = glm::vec3(0.f);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
}
}
__global__ void shade_image(int num_paths, PathSegment* pathSegments, Geom* geoms, int geoms_size, glm::vec3* image) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
int mesh_idx = 0;
for (int i = 0; i < geoms_size; ++i) {
if (geoms[i].hasTexture) {
mesh_idx = i;
}
}
unsigned int colR = (unsigned int) geoms[mesh_idx].dev_texture[pathSegments[idx].pixelIndex * 4];
unsigned int colG = (unsigned int) geoms[mesh_idx].dev_texture[pathSegments[idx].pixelIndex * 4 + 1];
unsigned int colB = (unsigned int) geoms[mesh_idx].dev_texture[pathSegments[idx].pixelIndex * 4 + 2];
image[pathSegments[idx].pixelIndex] += glm::vec3((float)colR / 255.f, (float)colG / 255.f, (float)colB / 255.f);
}
}
__global__ void shade_bounce(int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Geom* geoms, Material* materials, glm::vec3* image) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
// int ori_idx = pathSegments[idx].ori_id;
// ShadeableIntersection intersection = shadeableIntersections[ori_idx];
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) {
// If the intersection exists
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
// thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
else {
// BSDF accumulate:
// const ShadeableIntersection temp_intersect = shadeableIntersections[ori_idx];
const ShadeableIntersection temp_intersect = shadeableIntersections[idx];
glm::vec3 intersect_pos = temp_intersect.t * pathSegments[idx].ray.direction + pathSegments[idx].ray.origin;
scatterRay(pathSegments[idx], intersect_pos, temp_intersect.surfaceNormal, temp_intersect.outside, temp_intersect, geoms, material, rng);
}
}
else {
// If there was no intersection, color the ray black.
pathSegments[idx].color = glm::vec3(0.f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct zero_bounce
{
__host__ __device__
bool operator()(const PathSegment x)
{
return x.remainingBounces == 0;
}
};
struct mat_sort
{
__host__ __device__
bool operator()(const ShadeableIntersection& i1, const ShadeableIntersection& i2) {
return i1.materialId > i2.materialId;
}
};
__global__ void print_remain_bounces(int nPaths, PathSegment* iterationPaths) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths) {
if (index == 0) {
printf("remainbounces:%d\n", iterationPaths[index].remainingBounces);
}
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
bool iterationComplete = false;
// std::cout << "iter:" << iter << std::endl;
// TODO: perform one iteration of path tracing
#ifdef CACHE_BOUNCE
if (iter == 1) {
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
}
else {
// Generate ray from cached first intersection
cudaMemcpy(dev_intersections, dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_paths, dev_cache_paths, pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
dim3 numblocksPathSegmentTracing = (pixelcount + blockSize1d - 1) / blockSize1d;
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (pixelcount, dev_cache_paths);
shade_bounce << <numblocksPathSegmentTracing, blockSize1d >> > (iter, pixelcount, dev_intersections, dev_paths, dev_geoms, dev_materials, dev_image);
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (pixelcount, dev_paths);
PathSegment* new_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + pixelcount, zero_bounce());
if (new_end == dev_paths) {
iterationComplete = true;
}
else {
num_paths = new_end - dev_paths;
}
depth++;
}
#else
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
#endif // CACHE_BOUNCE
// std::cout << "traceDepth:" << traceDepth << std::endl;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
while (!iterationComplete) {
std::cout << "Depth:" << depth << std::endl;
std::cout << "Num of path:" << num_paths << std::endl << std::endl;
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
#ifdef MATERIAL_SORT
// TODO: Sort rays by material
thrust::stable_sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, mat_sort());
#endif //
#ifdef CACHE_BOUNCE
// Cache the first intersection
if (iter == 1 && depth == 1) {
cudaMemcpy(dev_cache_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_cache_paths, dev_paths, pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (pixelcount, dev_paths);
}
#endif // CACHE_BOUNCE
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shade_bounce <<<numblocksPathSegmentTracing, blockSize1d>>> (iter, num_paths, dev_intersections, dev_paths, dev_geoms, dev_materials, dev_image);
// TODO: should be based off stream compaction results.
// Stream compact away all of the terminated paths.
PathSegment* new_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, zero_bounce());
// std::cout << "num_path before remove:" << num_paths << std::endl;
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (num_paths, dev_paths);
if (new_end == dev_paths) {
iterationComplete = true;
}
else {
num_paths = new_end - dev_paths;
}
// std::cout << "num_path after remove:" << num_paths << std::endl;
// iterationComplete = true;
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
void directlight_pathtrace(uchar4* pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
bool iterationComplete = false;
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
// std::cout << "traceDepth:" << traceDepth << std::endl;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
while (!iterationComplete) {
// std::cout << "Num of path:" << num_paths << std::endl;
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
#ifdef MATERIAL_SORT
// TODO: Sort rays by material
thrust::stable_sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, mat_sort());
#endif //
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
directlight_shade_bounce <<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths, dev_geoms,
hst_scene->geoms.size(),
dev_materials,
dev_image);
// dim3 numblocksImageShade = (512 * 512 + blockSize1d - 1) / blockSize1d;
// shade_image <<<numblocksImageShade, blockSize1d>>> (num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_image);
// TODO: should be based off stream compaction results.
// Stream compact away all of the terminated paths.
PathSegment* new_end = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, zero_bounce());
// std::cout << "num_path before remove:" << num_paths << std::endl;
// print_remain_bounces << <numblocksPathSegmentTracing, blockSize1d >> > (num_paths, dev_paths);
if (new_end == dev_paths) {
iterationComplete = true;
}
else {
num_paths = new_end - dev_paths;
}
// std::cout << "num_path after remove:" << num_paths << std::endl;
iterationComplete = true;
}
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("direct light pathtrace");
}
|
0022ed72a918f6e1b3e0e1600a046d8ba4d3b17f.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_math.h>
// Provide Debugging Functionality
#include "cuPrintf.hip"
#if __CUDA_ARCH__ < 200 //Compute capability 1.x architectures
#define CUPRINTF cuPrintf
#else //Compute capability 2.x architectures
#define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \
blockIdx.y*gridDim.x+blockIdx.x,\
threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\
__VA_ARGS__)
#endif
#define ROWS 256 // num of parallel subfilters
#define DEB 0 // compare cpu and gpu results
#define TIMING 1 // measure the kernel execution time
__constant__ float2 NSEC[ROWS];
__constant__ float2 DSEC[ROWS];
// Parallel IIR: CPU
void cpu_pariir(float *x, float *y, float *ns, float *dsec, float c, int len);
// Check the results from CPU and GPU
void check(float *cpu, float *gpu, int len, int tot_chn);
template <int blockSize>
__global__ void GpuParIIR (float *x, int len, float c, float *y)
{
extern __shared__ float sm[];
float *sp = &sm[ROWS];
int tid = threadIdx.x;
//int id = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
// & 0x20
int lane_id = tid % 32; // warp size 32 for +3.5 device
int warp_id = tid / 32;
int ii, jj, kk;
float2 u = make_float2(0.0f);
float unew;
float y0;
// block size : ROWS
// each thread fetch input x to shared memory
for(ii=0; ii<len; ii+=ROWS)
{
sm[tid] = x[tid + ii];
__syncthreads();
// go through each x in shared memory
for(jj=0; jj<ROWS; jj++)
{
unew = sm[jj] - dot(u, DSEC[tid]);
u = make_float2(unew, u.x);
y0 = dot(u, NSEC[tid]);
// sum v across current block
#pragma unroll
for(kk=1; kk<32; kk<<=1)
{
y0 += __shfl_xor(y0, kk, 32);
}
if(lane_id == 0)
{
sp[warp_id] = y0;
}
__syncthreads();
if(blockSize == 256 && warp_id == 0)
{
if(lane_id < 8)
{
float warp_sum = sp[lane_id];
warp_sum += __shfl_xor(warp_sum, 1, 32); // ? 32
warp_sum += __shfl_xor(warp_sum, 2, 32); // ? 32
warp_sum += __shfl_xor(warp_sum, 4, 32); // ? 32
if(lane_id == 0){
// channel starting postion: blockId.x * len
uint gid = __mul24(blockIdx.x , len) + ii + jj;
y[gid] = warp_sum + sm[jj] * c;
}
}
}
}
}
}
int main(int argc, char *argv[])
{
if(argc != 2){
printf("Missing the length of input!\nUsage: ./parIIR Len\n");
exit(EXIT_FAILURE);
}
int i, j;
int channels = 64;
int len = atoi(argv[1]); // signal length
size_t bytes = sizeof(float) * len;
// input
float *x= (float*) malloc(bytes);
for (i=0; i<len; i++){
x[i] = 0.1f;
}
// output: multi-channel from GPU
float *gpu_y= (float*) malloc(bytes * channels);
// cpu output:
float *cpu_y= (float*) malloc(bytes);
float c = 3.0;
// coefficients
float *nsec, *dsec;
nsec = (float*) malloc(sizeof(float) * 2 * ROWS); // numerator
dsec = (float*) malloc(sizeof(float) * 3 * ROWS); // denominator
for(i=0; i<ROWS; i++){
for(j=0; j<3; j++){
dsec[i*3 + j] = 0.00002f;
}
}
for(i=0; i<ROWS; i++){
for(j=0; j<2; j++){
nsec[i*2 + j] = 0.00005f;
}
}
// compute the cpu results
cpu_pariir(x, cpu_y, nsec, dsec, c, len);
int warpsize = 32;
int warpnum = ROWS/warpsize;
// vectorize the coefficients
float2 *vns, *vds;
vns = (float2*) malloc(sizeof(float2) * ROWS);
vds = (float2*) malloc(sizeof(float2) * ROWS);
for(i=0; i<ROWS; i++){
vds[i] = make_float2(0.00002f);
vns[i] = make_float2(0.00005f);
}
// timer
hipEvent_t start, stop;
// device memory
float *d_x;
hipMalloc((void **)&d_x, bytes);
float *d_y;
hipMalloc((void **)&d_y, bytes * channels);
// copy data to constant memory
hipMemcpyToSymbol(NSEC, vns, sizeof(float2)*ROWS, 0,
hipMemcpyHostToDevice);
hipMemcpyToSymbol(DSEC, vds, sizeof(float2)*ROWS, 0,
hipMemcpyHostToDevice);
hipMemcpy(d_x, x, bytes, hipMemcpyHostToDevice);
#if TIMING
hipEventCreate(&start);
hipEventCreate(&stop);
// start timer
hipEventRecord(start, 0);
#endif
// kernel
hipLaunchKernelGGL(( GpuParIIR <ROWS>)
, dim3(channels), dim3(ROWS), sizeof(float) * (ROWS + warpnum) , 0, d_x, len, c, d_y);
#if TIMING
// end timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float et;
hipEventElapsedTime(&et, start, stop);
printf ("ElapsetTime = %f (s)\n", et/1000.f);
#endif
hipMemcpy(gpu_y, d_y, bytes * channels, hipMemcpyDeviceToHost);
check(cpu_y, gpu_y, len, channels);
// release
hipFree(d_x);
hipFree(d_y);
free(x);
free(gpu_y);
free(cpu_y);
free(dsec);
free(nsec);
free(vds);
free(vns);
}
void cpu_pariir(float *x, float *y, float *ns, float *dsec, float c, int len)
{
int i, j;
float out;
float unew;
float *ds = (float*) malloc(sizeof(float) * ROWS * 2);
// internal state
float *u = (float*) malloc(sizeof(float) * ROWS * 2);
memset(u, 0 , sizeof(float) * ROWS * 2);
for(i=0; i<ROWS; i++)
{
ds[i * 2] = dsec[3 * i + 1];
ds[i * 2 + 1] = dsec[3 * i + 2];
}
for(i=0; i<len; i++)
{
out = c * x[i];
for(j=0; j<ROWS; j++)
{
unew = x[i] - (ds[j*2] * u[j*2] + ds[j*2+1] * u[j*2+1]);
u[j*2+1] = u[j * 2];
u[j*2] = unew;
out = out + (u[j*2] * ns[j*2] + u[j*2 + 1] * ns[j*2 + 1]);
}
y[i] = out;
}
free(ds);
free(u);
}
void check(float *cpu, float *gpu, int len, int tot_chn)
{
int i;
int chn;
uint start;
int success = 1;
for(chn=0; chn<tot_chn; chn++)
{
start = chn * len;
for(i=0; i<len; i++)
{
if(cpu[i] - gpu[i + start] > 0.0001)
{
puts("Failed!");
success = 0;
break;
}
}
}
if(success)
puts("Passed!");
#if DEB
for(i=0; i<len; i++)
{
printf("[%d]\t cpu=%f \t gpu=%f\n", i, cpu[i], gpu[i]);
}
#endif
}
| 0022ed72a918f6e1b3e0e1600a046d8ba4d3b17f.cu | // System includes
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_math.h>
// Provide Debugging Functionality
#include "cuPrintf.cu"
#if __CUDA_ARCH__ < 200 //Compute capability 1.x architectures
#define CUPRINTF cuPrintf
#else //Compute capability 2.x architectures
#define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \
blockIdx.y*gridDim.x+blockIdx.x,\
threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\
__VA_ARGS__)
#endif
#define ROWS 256 // num of parallel subfilters
#define DEB 0 // compare cpu and gpu results
#define TIMING 1 // measure the kernel execution time
__constant__ float2 NSEC[ROWS];
__constant__ float2 DSEC[ROWS];
// Parallel IIR: CPU
void cpu_pariir(float *x, float *y, float *ns, float *dsec, float c, int len);
// Check the results from CPU and GPU
void check(float *cpu, float *gpu, int len, int tot_chn);
template <int blockSize>
__global__ void GpuParIIR (float *x, int len, float c, float *y)
{
extern __shared__ float sm[];
float *sp = &sm[ROWS];
int tid = threadIdx.x;
//int id = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
// & 0x20
int lane_id = tid % 32; // warp size 32 for +3.5 device
int warp_id = tid / 32;
int ii, jj, kk;
float2 u = make_float2(0.0f);
float unew;
float y0;
// block size : ROWS
// each thread fetch input x to shared memory
for(ii=0; ii<len; ii+=ROWS)
{
sm[tid] = x[tid + ii];
__syncthreads();
// go through each x in shared memory
for(jj=0; jj<ROWS; jj++)
{
unew = sm[jj] - dot(u, DSEC[tid]);
u = make_float2(unew, u.x);
y0 = dot(u, NSEC[tid]);
// sum v across current block
#pragma unroll
for(kk=1; kk<32; kk<<=1)
{
y0 += __shfl_xor(y0, kk, 32);
}
if(lane_id == 0)
{
sp[warp_id] = y0;
}
__syncthreads();
if(blockSize == 256 && warp_id == 0)
{
if(lane_id < 8)
{
float warp_sum = sp[lane_id];
warp_sum += __shfl_xor(warp_sum, 1, 32); // ? 32
warp_sum += __shfl_xor(warp_sum, 2, 32); // ? 32
warp_sum += __shfl_xor(warp_sum, 4, 32); // ? 32
if(lane_id == 0){
// channel starting postion: blockId.x * len
uint gid = __mul24(blockIdx.x , len) + ii + jj;
y[gid] = warp_sum + sm[jj] * c;
}
}
}
}
}
}
int main(int argc, char *argv[])
{
if(argc != 2){
printf("Missing the length of input!\nUsage: ./parIIR Len\n");
exit(EXIT_FAILURE);
}
int i, j;
int channels = 64;
int len = atoi(argv[1]); // signal length
size_t bytes = sizeof(float) * len;
// input
float *x= (float*) malloc(bytes);
for (i=0; i<len; i++){
x[i] = 0.1f;
}
// output: multi-channel from GPU
float *gpu_y= (float*) malloc(bytes * channels);
// cpu output:
float *cpu_y= (float*) malloc(bytes);
float c = 3.0;
// coefficients
float *nsec, *dsec;
nsec = (float*) malloc(sizeof(float) * 2 * ROWS); // numerator
dsec = (float*) malloc(sizeof(float) * 3 * ROWS); // denominator
for(i=0; i<ROWS; i++){
for(j=0; j<3; j++){
dsec[i*3 + j] = 0.00002f;
}
}
for(i=0; i<ROWS; i++){
for(j=0; j<2; j++){
nsec[i*2 + j] = 0.00005f;
}
}
// compute the cpu results
cpu_pariir(x, cpu_y, nsec, dsec, c, len);
int warpsize = 32;
int warpnum = ROWS/warpsize;
// vectorize the coefficients
float2 *vns, *vds;
vns = (float2*) malloc(sizeof(float2) * ROWS);
vds = (float2*) malloc(sizeof(float2) * ROWS);
for(i=0; i<ROWS; i++){
vds[i] = make_float2(0.00002f);
vns[i] = make_float2(0.00005f);
}
// timer
cudaEvent_t start, stop;
// device memory
float *d_x;
cudaMalloc((void **)&d_x, bytes);
float *d_y;
cudaMalloc((void **)&d_y, bytes * channels);
// copy data to constant memory
cudaMemcpyToSymbol(NSEC, vns, sizeof(float2)*ROWS, 0,
cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(DSEC, vds, sizeof(float2)*ROWS, 0,
cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, bytes, cudaMemcpyHostToDevice);
#if TIMING
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start, 0);
#endif
// kernel
GpuParIIR <ROWS>
<<< channels, ROWS, sizeof(float) * (ROWS + warpnum) >>> (d_x, len, c, d_y);
#if TIMING
// end timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
printf ("ElapsetTime = %f (s)\n", et/1000.f);
#endif
cudaMemcpy(gpu_y, d_y, bytes * channels, cudaMemcpyDeviceToHost);
check(cpu_y, gpu_y, len, channels);
// release
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(gpu_y);
free(cpu_y);
free(dsec);
free(nsec);
free(vds);
free(vns);
}
void cpu_pariir(float *x, float *y, float *ns, float *dsec, float c, int len)
{
int i, j;
float out;
float unew;
float *ds = (float*) malloc(sizeof(float) * ROWS * 2);
// internal state
float *u = (float*) malloc(sizeof(float) * ROWS * 2);
memset(u, 0 , sizeof(float) * ROWS * 2);
for(i=0; i<ROWS; i++)
{
ds[i * 2] = dsec[3 * i + 1];
ds[i * 2 + 1] = dsec[3 * i + 2];
}
for(i=0; i<len; i++)
{
out = c * x[i];
for(j=0; j<ROWS; j++)
{
unew = x[i] - (ds[j*2] * u[j*2] + ds[j*2+1] * u[j*2+1]);
u[j*2+1] = u[j * 2];
u[j*2] = unew;
out = out + (u[j*2] * ns[j*2] + u[j*2 + 1] * ns[j*2 + 1]);
}
y[i] = out;
}
free(ds);
free(u);
}
void check(float *cpu, float *gpu, int len, int tot_chn)
{
int i;
int chn;
uint start;
int success = 1;
for(chn=0; chn<tot_chn; chn++)
{
start = chn * len;
for(i=0; i<len; i++)
{
if(cpu[i] - gpu[i + start] > 0.0001)
{
puts("Failed!");
success = 0;
break;
}
}
}
if(success)
puts("Passed!");
#if DEB
for(i=0; i<len; i++)
{
printf("[%d]\t cpu=%f \t gpu=%f\n", i, cpu[i], gpu[i]);
}
#endif
}
|
ba498b54500becebdcfc6e8ff9b14db34149dd78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/hpc_helpers.hpp"
template <
typename index_t,
typename value_t,
index_t num_iters=256> __global__
void square_root_kernel(
value_t * Data,
index_t length) {
const index_t thid = blockDim.x*blockIdx.x+threadIdx.x;
for (index_t i = thid; i < length; i += blockDim.x*gridDim.x){
value_t value = Data[i];
value_t root = value;
# pragma unroll (32)
for (index_t iters = 0; iters < num_iters && value; iters++)
root = 0.5*(root+value/root);
Data[i] = root;
}
}
int main () {
typedef float value_t;
typedef uint64_t index_t;
const index_t length = 1UL << 30;
value_t * data = nullptr, * Data = nullptr;
hipHostMalloc(&data, sizeof(value_t)*length); CUERR
hipMalloc (&Data, sizeof(value_t)*length); CUERR
for (index_t index = 0; index < length; index++)
data[index] = index;
TIMERSTART(overall)
TIMERSTART(host_to_device)
hipMemcpy(Data, data, sizeof(value_t)*length,
hipMemcpyHostToDevice); CUERR
TIMERSTOP(host_to_device)
TIMERSTART(square_root_kernel)
hipLaunchKernelGGL(( square_root_kernel), dim3(1024), dim3(1024), 0, 0, Data, length); CUERR
TIMERSTOP(square_root_kernel)
TIMERSTART(device_to_host)
hipMemcpy(data, Data, sizeof(value_t)*length,
hipMemcpyDeviceToHost); CUERR
TIMERSTOP(device_to_host)
TIMERSTOP(overall)
for (index_t index = 0; index < 10; index++)
std::cout << index << " " << data[index] << std::endl;
hipHostFree(data); CUERR
hipFree(Data); CUERR
}
| ba498b54500becebdcfc6e8ff9b14db34149dd78.cu | #include "../include/hpc_helpers.hpp"
template <
typename index_t,
typename value_t,
index_t num_iters=256> __global__
void square_root_kernel(
value_t * Data,
index_t length) {
const index_t thid = blockDim.x*blockIdx.x+threadIdx.x;
for (index_t i = thid; i < length; i += blockDim.x*gridDim.x){
value_t value = Data[i];
value_t root = value;
# pragma unroll (32)
for (index_t iters = 0; iters < num_iters && value; iters++)
root = 0.5*(root+value/root);
Data[i] = root;
}
}
int main () {
typedef float value_t;
typedef uint64_t index_t;
const index_t length = 1UL << 30;
value_t * data = nullptr, * Data = nullptr;
cudaMallocHost(&data, sizeof(value_t)*length); CUERR
cudaMalloc (&Data, sizeof(value_t)*length); CUERR
for (index_t index = 0; index < length; index++)
data[index] = index;
TIMERSTART(overall)
TIMERSTART(host_to_device)
cudaMemcpy(Data, data, sizeof(value_t)*length,
cudaMemcpyHostToDevice); CUERR
TIMERSTOP(host_to_device)
TIMERSTART(square_root_kernel)
square_root_kernel<<<1024, 1024>>>(Data, length); CUERR
TIMERSTOP(square_root_kernel)
TIMERSTART(device_to_host)
cudaMemcpy(data, Data, sizeof(value_t)*length,
cudaMemcpyDeviceToHost); CUERR
TIMERSTOP(device_to_host)
TIMERSTOP(overall)
for (index_t index = 0; index < 10; index++)
std::cout << index << " " << data[index] << std::endl;
cudaFreeHost(data); CUERR
cudaFree(Data); CUERR
}
|
abbadcddb87110bf7916dd8c279a1888679050a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <float.h>
#include <stdint.h>
#include <limits>
#include "BufferCompaction.h"
#include "ExtensionFunctions.hpp"
#include "GpuRtConstants.h"
#include "HyperLogLogRank.h"
extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) {
return blockIdx.x * blockDim.x + threadIdx.x;
}
extern "C" __device__ int32_t group_buff_idx_impl() {
return pos_start_impl(NULL);
}
extern "C" __device__ int32_t pos_step_impl() {
return blockDim.x * gridDim.x;
}
extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) {
return threadIdx.x % warp_sz;
}
extern "C" __device__ const int64_t* init_shared_mem_nop(
const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
return groups_buffer;
}
extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) {
}
extern "C" __device__ const int64_t* init_shared_mem(const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
extern __shared__ int64_t fast_bins[];
if (threadIdx.x == 0) {
memcpy(fast_bins, groups_buffer, groups_buffer_size);
}
__syncthreads();
return fast_bins;
}
/**
* Dynamically allocates shared memory per block.
* The amount of shared memory allocated is defined at kernel launch time.
* Returns a pointer to the beginning of allocated shared memory
*/
extern "C" __device__ int64_t* alloc_shared_mem_dynamic() {
extern __shared__ int64_t groups_buffer_smem[];
return groups_buffer_smem;
}
/**
* Set the allocated shared memory elements to be equal to the 'identity_element'.
* groups_buffer_size: number of 64-bit elements in shared memory per thread-block
* NOTE: groups_buffer_size is in units of 64-bit elements.
*/
extern "C" __device__ void set_shared_mem_to_identity(
int64_t* groups_buffer_smem,
const int32_t groups_buffer_size,
const int64_t identity_element = 0) {
#pragma unroll
for (int i = threadIdx.x; i < groups_buffer_size; i += blockDim.x) {
groups_buffer_smem[i] = identity_element;
}
__syncthreads();
}
/**
* Initialize dynamic shared memory:
* 1. Allocates dynamic shared memory
* 2. Set every allocated element to be equal to the 'identity element', by default zero.
*/
extern "C" __device__ const int64_t* init_shared_mem_dynamic(
const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
int64_t* groups_buffer_smem = alloc_shared_mem_dynamic();
set_shared_mem_to_identity(groups_buffer_smem, groups_buffer_size);
return groups_buffer_smem;
}
extern "C" __device__ void write_back(int64_t* dest, int64_t* src, const int32_t sz) {
__syncthreads();
if (threadIdx.x == 0) {
memcpy(dest, src, sz);
}
}
extern "C" __device__ void write_back_smem_nop(int64_t* dest,
int64_t* src,
const int32_t sz) {}
extern "C" __device__ void agg_from_smem_to_gmem_nop(int64_t* gmem_dest,
int64_t* smem_src,
const int32_t num_elements) {}
/**
* Aggregate the result stored into shared memory back into global memory.
* It also writes back the stored binId, if any, back into global memory.
* Memory layout assumption: each 64-bit shared memory unit of data is as follows:
* [0..31: the stored bin ID, to be written back][32..63: the count result, to be
* aggregated]
*/
extern "C" __device__ void agg_from_smem_to_gmem_binId_count(int64_t* gmem_dest,
int64_t* smem_src,
const int32_t num_elements) {
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
int32_t bin_id = *reinterpret_cast<int32_t*>(smem_src + i);
int32_t count_result = *(reinterpret_cast<int32_t*>(smem_src + i) + 1);
if (count_result) { // non-zero count
atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1,
static_cast<int32_t>(count_result));
// writing back the binId, only if count_result is non-zero
*reinterpret_cast<unsigned int*>(gmem_dest + i) = static_cast<int32_t>(bin_id);
}
}
}
/**
* Aggregate the result stored into shared memory back into global memory.
* It also writes back the stored binId, if any, back into global memory.
* Memory layout assumption: each 64-bit shared memory unit of data is as follows:
* [0..31: the count result, to be aggregated][32..63: the stored bin ID, to be written
* back]
*/
extern "C" __device__ void agg_from_smem_to_gmem_count_binId(int64_t* gmem_dest,
int64_t* smem_src,
const int32_t num_elements) {
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
int32_t count_result = *reinterpret_cast<int32_t*>(smem_src + i);
int32_t bin_id = *(reinterpret_cast<int32_t*>(smem_src + i) + 1);
if (count_result) { // non-zero count
atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i),
static_cast<int32_t>(count_result));
// writing back the binId, only if count_result is non-zero
*(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1) =
static_cast<int32_t>(bin_id);
}
}
}
#define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu
#include "GpuInitGroups.cu"
#undef init_group_by_buffer_gpu_impl
// Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60:
// 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors
// TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA
__device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel
// TODO(Saman): make this cycle budget something constant in codegen level
__device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel
__device__ int32_t dw_abort = 0; // TBD: set from host (async)
__inline__ __device__ uint32_t get_smid(void) {
uint32_t ret;
asm("mov.u32 %0, %%smid;" : "=r"(ret));
return ret;
}
/*
* The main objective of this funciton is to return true, if any of the following two
* scnearios happen:
* 1. receives a host request for aborting the kernel execution
* 2. kernel execution takes longer clock cycles than it was initially allowed
* The assumption is that all (or none) threads within a block return true for the
* watchdog, and the first thread within each block compares the recorded clock cycles for
* its occupying SM with the allowed budget. It also assumess that all threads entering
* this function are active (no critical edge exposure)
* NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global
* memory scope.
*/
extern "C" __device__ bool dynamic_watchdog() {
// check for dynamic watchdog, if triggered all threads return true
if (dw_cycle_budget == 0LL) {
return false; // Uninitialized watchdog can't check time
}
if (dw_abort == 1) {
return true; // Received host request to abort
}
uint32_t smid = get_smid();
if (smid >= 128) {
return false;
}
__shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
__shared__ volatile bool
dw_should_terminate; // all threads within a block should return together if
// watchdog criteria is met
// thread 0 either initializes or read the initial clock cycle, the result is stored
// into shared memory. Since all threads wihtin a block shares the same SM, there's no
// point in using more threads here.
if (threadIdx.x == 0) {
dw_block_cycle_start = 0LL;
int64_t cycle_count = static_cast<int64_t>(clock64());
// Make sure the block hasn't switched SMs
if (smid == get_smid()) {
dw_block_cycle_start = static_cast<int64_t>(
atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
0ULL,
static_cast<unsigned long long>(cycle_count)));
}
int64_t cycles = cycle_count - dw_block_cycle_start;
if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
(cycles > dw_cycle_budget)) {
// Check if we're out of time on this particular SM
dw_should_terminate = true;
} else {
dw_should_terminate = false;
}
}
__syncthreads();
return dw_should_terminate;
}
template <typename T = unsigned long long>
inline __device__ T get_empty_key() {
return EMPTY_KEY_64;
}
template <>
inline __device__ unsigned int get_empty_key() {
return EMPTY_KEY_32;
}
template <typename T>
inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const T* key,
const uint32_t key_count,
const uint32_t row_size_quad) {
const T empty_key = get_empty_key<T>();
uint32_t off = h * row_size_quad;
auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
{
const T old = atomicCAS(row_ptr, empty_key, *key);
if (empty_key == old && key_count > 1) {
for (size_t i = 1; i <= key_count - 1; ++i) {
atomicExch(row_ptr + i, key[i]);
}
}
}
if (key_count > 1) {
while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) {
// spin until the winning thread has finished writing the entire key and the init
// value
}
}
bool match = true;
for (uint32_t i = 0; i < key_count; ++i) {
if (row_ptr[i] != key[i]) {
match = false;
break;
}
}
if (match) {
auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
}
return NULL;
}
extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width,
const uint32_t row_size_quad,
const int64_t* init_vals) {
switch (key_width) {
case 4:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned int*>(key),
key_count,
row_size_quad);
case 8:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count,
row_size_quad);
default:
return NULL;
}
}
template <typename T>
__device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const T* key,
const uint32_t key_count) {
uint32_t off = h;
{
const uint64_t old =
atomicCAS(reinterpret_cast<T*>(groups_buffer + off), get_empty_key<T>(), *key);
if (old == get_empty_key<T>()) {
for (size_t i = 0; i < key_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return h;
}
}
__syncthreads();
off = h;
for (size_t i = 0; i < key_count; ++i) {
if (groups_buffer[off] != key[i]) {
return -1;
}
off += entry_count;
}
return h;
}
extern "C" __device__ int32_t
get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width) {
switch (key_width) {
case 4:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned int*>(key),
key_count);
case 8:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count);
default:
return -1;
}
}
extern "C" __device__ int64_t* get_matching_group_value_columnar(
int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_qw_count,
const size_t entry_count) {
uint32_t off = h;
{
const uint64_t old = atomicCAS(
reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key);
if (EMPTY_KEY_64 == old) {
for (size_t i = 0; i < key_qw_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return &groups_buffer[off];
}
}
__syncthreads();
off = h;
for (size_t i = 0; i < key_qw_count; ++i) {
if (groups_buffer[off] != key[i]) {
return NULL;
}
off += entry_count;
}
return &groups_buffer[off];
}
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "MurmurHash.cpp"
#include "TopKRuntime.cpp"
__device__ int64_t atomicMax64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
__device__ int64_t atomicMin64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
// As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute
// capability >= 6.0.
#if TORCH_HIP_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600)
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ double atomicMax(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(max(val, __longlong_as_double(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicMax(float* address, float val) {
int* address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
__device__ double atomicMin(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ double atomicMin(float* address, float val) {
int* address_as_ull = (int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) {
return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL));
}
extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) {
return atomicAdd(agg, 1UL);
}
extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) {
return agg_count_shared(agg, val);
}
extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) {
return agg_count_int32_shared(agg, val);
}
extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) {
return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val);
}
extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) {
return atomicAdd(agg, val);
}
extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) {
atomicAdd(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) {
atomicAdd(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) {
atomicMax64(agg, val);
}
extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) {
atomicMax(agg, val);
}
extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) {
atomicMax(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) {
atomicMax(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) {
atomicMin64(agg, val);
}
extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) {
atomicMin(agg, val);
}
extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) {
atomicMin(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) {
atomicMin(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) {
*agg = val;
}
#define DEF_AGG_ID_INT_SHARED(n) \
extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \
const int##n##_t val) { \
*agg = val; \
}
DEF_AGG_ID_INT_SHARED(32)
DEF_AGG_ID_INT_SHARED(16)
DEF_AGG_ID_INT_SHARED(8)
#undef DEF_AGG_ID_INT_SHARED
extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) {
*agg = *(reinterpret_cast<const int64_t*>(&val));
}
extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) {
*agg = *(reinterpret_cast<const int64_t*>(val));
}
extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) {
*agg = __float_as_int(val);
}
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return 0; \
}
#define DATA_T int64_t
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count)
#undef DATA_T
#undef ADDR_T
#define DATA_T int32_t
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_int32)
#undef DATA_T
#undef ADDR_T
// Initial value for nullable column is INT32_MIN
extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
agg_max_int32_shared(agg, val);
}
}
__device__ int32_t atomicMin32SkipVal(int32_t* address,
int32_t val,
const int32_t skip_val) {
int32_t old = atomicExch(address, INT_MAX);
return atomicMin(address, old == skip_val ? val : min(old, val));
}
extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
atomicMin32SkipVal(agg, val, skip_val);
}
}
__device__ int32_t atomicSum32SkipVal(int32_t* address,
const int32_t val,
const int32_t skip_val) {
unsigned int* address_as_int = (unsigned int*)address;
int32_t old = atomicExch(address_as_int, 0);
int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
const int32_t old = atomicSum32SkipVal(agg, val, skip_val);
return old;
}
return 0;
}
__device__ int64_t atomicSum64SkipVal(int64_t* address,
const int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
int64_t old = atomicExch(address_as_ull, 0);
int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
return atomicSum64SkipVal(agg, val, skip_val);
}
return 0;
}
__device__ int64_t atomicMin64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMin64SkipVal(agg, val, skip_val);
}
}
__device__ int64_t atomicMax64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMax64SkipVal(agg, val, skip_val);
}
}
#undef DEF_SKIP_AGG
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return *agg; \
}
#define DATA_T double
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count_double)
#undef ADDR_T
#undef DATA_T
#define DATA_T float
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_float)
#undef ADDR_T
#undef DATA_T
// Initial value for nullable column is FLOAT_MIN
extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX);
atomicMax(reinterpret_cast<float*>(agg),
__float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val));
}
}
__device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) {
float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX);
return atomicMin(
reinterpret_cast<float*>(address),
__float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val));
}
extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicMinFltSkipVal(agg, val, skip_val);
}
}
__device__ void atomicSumFltSkipVal(float* address,
const float val,
const float skip_val) {
float old = atomicExch(address, 0.f);
atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val);
}
}
__device__ void atomicSumDblSkipVal(double* address,
const double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.)));
atomicAdd(
address,
__double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (__double_as_longlong(val) != __double_as_longlong(skip_val)) {
atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMinDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull =
*reinterpret_cast<const unsigned long long*>(&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *reinterpret_cast<unsigned long long*>(&val)
: __double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMaxDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *((unsigned long long int*)&val)
: __double_as_longlong(max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
#undef DEF_SKIP_AGG
#include "../Utils/ChunkIter.cpp"
#include "DateTruncate.cpp"
#include "ExtractFromTime.cpp"
#define EXECUTE_INCLUDE
#include "ArrayOps.cpp"
#include "DateAdd.cpp"
#include "StringFunctions.cpp"
#undef EXECUTE_INCLUDE
#include "../Utils/Regexp.cpp"
#include "../Utils/StringLike.cpp"
extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) {
// TODO(alex): de-dup, the x64 version is basically identical
ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_);
VarlenDatum vd;
bool is_end;
ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end);
return vd.is_null ? 0
: (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) |
(static_cast<uint64_t>(vd.length) << 48);
}
extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap,
const uint32_t bitmap_bytes,
const uint8_t* key_bytes,
const uint32_t key_len) {
const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
const uint32_t word_idx = bit_pos / 32;
const uint32_t bit_idx = bit_pos % 32;
atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
}
extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
const uint64_t bitmap_idx = val - min_val;
const uint32_t byte_idx = bitmap_idx >> 3;
const uint32_t word_idx = byte_idx >> 2;
const uint32_t byte_word_idx = byte_idx & 3;
const int64_t host_addr = *agg;
uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr +
(threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes);
switch (byte_word_idx) {
case 0:
atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7));
break;
case 1:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8));
break;
case 2:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16));
break;
case 3:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24));
break;
default:
break;
}
}
extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu(
int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t skip_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
if (val != skip_val) {
agg_count_distinct_bitmap_gpu(
agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes);
}
}
extern "C" __device__ void agg_approximate_count_distinct_gpu(
int64_t* agg,
const int64_t key,
const uint32_t b,
const int64_t base_dev_addr,
const int64_t base_host_addr) {
const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
const uint32_t index = hash >> (64 - b);
const int32_t rank = get_rank(hash << b, 64 - b);
const int64_t host_addr = *agg;
int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr);
atomicMax(&M[index], rank);
}
extern "C" __device__ void force_sync() {
__threadfence_block();
}
extern "C" __device__ void sync_warp() {
#if (TORCH_HIP_VERSION >= 9000)
__syncwarp();
#endif
}
/**
* Protected warp synchornization to make sure all (or none) threads within a warp go
* through a synchronization barrier. thread_pos: the current thread position to be used
* for a memory access row_count: maximum number of rows to be processed The function
* performs warp sync iff all 32 threads within that warp will process valid data NOTE: it
* currently assumes that warp size is 32.
*/
extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) {
#if (TORCH_HIP_VERSION >= 9000)
// only syncing if NOT within the same warp as those threads experiencing the critical
// edge
if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) {
__syncwarp();
}
#endif
}
| abbadcddb87110bf7916dd8c279a1888679050a8.cu | #include <cuda.h>
#include <float.h>
#include <stdint.h>
#include <limits>
#include "BufferCompaction.h"
#include "ExtensionFunctions.hpp"
#include "GpuRtConstants.h"
#include "HyperLogLogRank.h"
extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) {
return blockIdx.x * blockDim.x + threadIdx.x;
}
extern "C" __device__ int32_t group_buff_idx_impl() {
return pos_start_impl(NULL);
}
extern "C" __device__ int32_t pos_step_impl() {
return blockDim.x * gridDim.x;
}
extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) {
return threadIdx.x % warp_sz;
}
extern "C" __device__ const int64_t* init_shared_mem_nop(
const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
return groups_buffer;
}
extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) {
}
extern "C" __device__ const int64_t* init_shared_mem(const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
extern __shared__ int64_t fast_bins[];
if (threadIdx.x == 0) {
memcpy(fast_bins, groups_buffer, groups_buffer_size);
}
__syncthreads();
return fast_bins;
}
/**
* Dynamically allocates shared memory per block.
* The amount of shared memory allocated is defined at kernel launch time.
* Returns a pointer to the beginning of allocated shared memory
*/
extern "C" __device__ int64_t* alloc_shared_mem_dynamic() {
extern __shared__ int64_t groups_buffer_smem[];
return groups_buffer_smem;
}
/**
* Set the allocated shared memory elements to be equal to the 'identity_element'.
* groups_buffer_size: number of 64-bit elements in shared memory per thread-block
* NOTE: groups_buffer_size is in units of 64-bit elements.
*/
extern "C" __device__ void set_shared_mem_to_identity(
int64_t* groups_buffer_smem,
const int32_t groups_buffer_size,
const int64_t identity_element = 0) {
#pragma unroll
for (int i = threadIdx.x; i < groups_buffer_size; i += blockDim.x) {
groups_buffer_smem[i] = identity_element;
}
__syncthreads();
}
/**
* Initialize dynamic shared memory:
* 1. Allocates dynamic shared memory
* 2. Set every allocated element to be equal to the 'identity element', by default zero.
*/
extern "C" __device__ const int64_t* init_shared_mem_dynamic(
const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
int64_t* groups_buffer_smem = alloc_shared_mem_dynamic();
set_shared_mem_to_identity(groups_buffer_smem, groups_buffer_size);
return groups_buffer_smem;
}
extern "C" __device__ void write_back(int64_t* dest, int64_t* src, const int32_t sz) {
__syncthreads();
if (threadIdx.x == 0) {
memcpy(dest, src, sz);
}
}
extern "C" __device__ void write_back_smem_nop(int64_t* dest,
int64_t* src,
const int32_t sz) {}
extern "C" __device__ void agg_from_smem_to_gmem_nop(int64_t* gmem_dest,
int64_t* smem_src,
const int32_t num_elements) {}
/**
* Aggregate the result stored into shared memory back into global memory.
* It also writes back the stored binId, if any, back into global memory.
* Memory layout assumption: each 64-bit shared memory unit of data is as follows:
* [0..31: the stored bin ID, to be written back][32..63: the count result, to be
* aggregated]
*/
extern "C" __device__ void agg_from_smem_to_gmem_binId_count(int64_t* gmem_dest,
int64_t* smem_src,
const int32_t num_elements) {
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
int32_t bin_id = *reinterpret_cast<int32_t*>(smem_src + i);
int32_t count_result = *(reinterpret_cast<int32_t*>(smem_src + i) + 1);
if (count_result) { // non-zero count
atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1,
static_cast<int32_t>(count_result));
// writing back the binId, only if count_result is non-zero
*reinterpret_cast<unsigned int*>(gmem_dest + i) = static_cast<int32_t>(bin_id);
}
}
}
/**
* Aggregate the result stored into shared memory back into global memory.
* It also writes back the stored binId, if any, back into global memory.
* Memory layout assumption: each 64-bit shared memory unit of data is as follows:
* [0..31: the count result, to be aggregated][32..63: the stored bin ID, to be written
* back]
*/
extern "C" __device__ void agg_from_smem_to_gmem_count_binId(int64_t* gmem_dest,
int64_t* smem_src,
const int32_t num_elements) {
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
int32_t count_result = *reinterpret_cast<int32_t*>(smem_src + i);
int32_t bin_id = *(reinterpret_cast<int32_t*>(smem_src + i) + 1);
if (count_result) { // non-zero count
atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i),
static_cast<int32_t>(count_result));
// writing back the binId, only if count_result is non-zero
*(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1) =
static_cast<int32_t>(bin_id);
}
}
}
#define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu
#include "GpuInitGroups.cu"
#undef init_group_by_buffer_gpu_impl
// Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60:
// 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors
// TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA
__device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel
// TODO(Saman): make this cycle budget something constant in codegen level
__device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel
__device__ int32_t dw_abort = 0; // TBD: set from host (async)
__inline__ __device__ uint32_t get_smid(void) {
uint32_t ret;
asm("mov.u32 %0, %%smid;" : "=r"(ret));
return ret;
}
/*
* The main objective of this funciton is to return true, if any of the following two
* scnearios happen:
* 1. receives a host request for aborting the kernel execution
* 2. kernel execution takes longer clock cycles than it was initially allowed
* The assumption is that all (or none) threads within a block return true for the
* watchdog, and the first thread within each block compares the recorded clock cycles for
* its occupying SM with the allowed budget. It also assumess that all threads entering
* this function are active (no critical edge exposure)
* NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global
* memory scope.
*/
extern "C" __device__ bool dynamic_watchdog() {
// check for dynamic watchdog, if triggered all threads return true
if (dw_cycle_budget == 0LL) {
return false; // Uninitialized watchdog can't check time
}
if (dw_abort == 1) {
return true; // Received host request to abort
}
uint32_t smid = get_smid();
if (smid >= 128) {
return false;
}
__shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
__shared__ volatile bool
dw_should_terminate; // all threads within a block should return together if
// watchdog criteria is met
// thread 0 either initializes or read the initial clock cycle, the result is stored
// into shared memory. Since all threads wihtin a block shares the same SM, there's no
// point in using more threads here.
if (threadIdx.x == 0) {
dw_block_cycle_start = 0LL;
int64_t cycle_count = static_cast<int64_t>(clock64());
// Make sure the block hasn't switched SMs
if (smid == get_smid()) {
dw_block_cycle_start = static_cast<int64_t>(
atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
0ULL,
static_cast<unsigned long long>(cycle_count)));
}
int64_t cycles = cycle_count - dw_block_cycle_start;
if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
(cycles > dw_cycle_budget)) {
// Check if we're out of time on this particular SM
dw_should_terminate = true;
} else {
dw_should_terminate = false;
}
}
__syncthreads();
return dw_should_terminate;
}
template <typename T = unsigned long long>
inline __device__ T get_empty_key() {
return EMPTY_KEY_64;
}
template <>
inline __device__ unsigned int get_empty_key() {
return EMPTY_KEY_32;
}
template <typename T>
inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const T* key,
const uint32_t key_count,
const uint32_t row_size_quad) {
const T empty_key = get_empty_key<T>();
uint32_t off = h * row_size_quad;
auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
{
const T old = atomicCAS(row_ptr, empty_key, *key);
if (empty_key == old && key_count > 1) {
for (size_t i = 1; i <= key_count - 1; ++i) {
atomicExch(row_ptr + i, key[i]);
}
}
}
if (key_count > 1) {
while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) {
// spin until the winning thread has finished writing the entire key and the init
// value
}
}
bool match = true;
for (uint32_t i = 0; i < key_count; ++i) {
if (row_ptr[i] != key[i]) {
match = false;
break;
}
}
if (match) {
auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
}
return NULL;
}
extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width,
const uint32_t row_size_quad,
const int64_t* init_vals) {
switch (key_width) {
case 4:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned int*>(key),
key_count,
row_size_quad);
case 8:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count,
row_size_quad);
default:
return NULL;
}
}
template <typename T>
__device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const T* key,
const uint32_t key_count) {
uint32_t off = h;
{
const uint64_t old =
atomicCAS(reinterpret_cast<T*>(groups_buffer + off), get_empty_key<T>(), *key);
if (old == get_empty_key<T>()) {
for (size_t i = 0; i < key_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return h;
}
}
__syncthreads();
off = h;
for (size_t i = 0; i < key_count; ++i) {
if (groups_buffer[off] != key[i]) {
return -1;
}
off += entry_count;
}
return h;
}
extern "C" __device__ int32_t
get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width) {
switch (key_width) {
case 4:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned int*>(key),
key_count);
case 8:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count);
default:
return -1;
}
}
extern "C" __device__ int64_t* get_matching_group_value_columnar(
int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_qw_count,
const size_t entry_count) {
uint32_t off = h;
{
const uint64_t old = atomicCAS(
reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key);
if (EMPTY_KEY_64 == old) {
for (size_t i = 0; i < key_qw_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return &groups_buffer[off];
}
}
__syncthreads();
off = h;
for (size_t i = 0; i < key_qw_count; ++i) {
if (groups_buffer[off] != key[i]) {
return NULL;
}
off += entry_count;
}
return &groups_buffer[off];
}
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "MurmurHash.cpp"
#include "TopKRuntime.cpp"
__device__ int64_t atomicMax64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
__device__ int64_t atomicMin64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
// As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute
// capability >= 6.0.
#if CUDA_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600)
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ double atomicMax(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(max(val, __longlong_as_double(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicMax(float* address, float val) {
int* address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
__device__ double atomicMin(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ double atomicMin(float* address, float val) {
int* address_as_ull = (int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) {
return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL));
}
extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) {
return atomicAdd(agg, 1UL);
}
extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) {
return agg_count_shared(agg, val);
}
extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) {
return agg_count_int32_shared(agg, val);
}
extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) {
return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val);
}
extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) {
return atomicAdd(agg, val);
}
extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) {
atomicAdd(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) {
atomicAdd(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) {
atomicMax64(agg, val);
}
extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) {
atomicMax(agg, val);
}
extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) {
atomicMax(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) {
atomicMax(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) {
atomicMin64(agg, val);
}
extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) {
atomicMin(agg, val);
}
extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) {
atomicMin(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) {
atomicMin(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) {
*agg = val;
}
#define DEF_AGG_ID_INT_SHARED(n) \
extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \
const int##n##_t val) { \
*agg = val; \
}
DEF_AGG_ID_INT_SHARED(32)
DEF_AGG_ID_INT_SHARED(16)
DEF_AGG_ID_INT_SHARED(8)
#undef DEF_AGG_ID_INT_SHARED
extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) {
*agg = *(reinterpret_cast<const int64_t*>(&val));
}
extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) {
*agg = *(reinterpret_cast<const int64_t*>(val));
}
extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) {
*agg = __float_as_int(val);
}
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return 0; \
}
#define DATA_T int64_t
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count)
#undef DATA_T
#undef ADDR_T
#define DATA_T int32_t
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_int32)
#undef DATA_T
#undef ADDR_T
// Initial value for nullable column is INT32_MIN
extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
agg_max_int32_shared(agg, val);
}
}
__device__ int32_t atomicMin32SkipVal(int32_t* address,
int32_t val,
const int32_t skip_val) {
int32_t old = atomicExch(address, INT_MAX);
return atomicMin(address, old == skip_val ? val : min(old, val));
}
extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
atomicMin32SkipVal(agg, val, skip_val);
}
}
__device__ int32_t atomicSum32SkipVal(int32_t* address,
const int32_t val,
const int32_t skip_val) {
unsigned int* address_as_int = (unsigned int*)address;
int32_t old = atomicExch(address_as_int, 0);
int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
const int32_t old = atomicSum32SkipVal(agg, val, skip_val);
return old;
}
return 0;
}
__device__ int64_t atomicSum64SkipVal(int64_t* address,
const int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
int64_t old = atomicExch(address_as_ull, 0);
int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
return atomicSum64SkipVal(agg, val, skip_val);
}
return 0;
}
__device__ int64_t atomicMin64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMin64SkipVal(agg, val, skip_val);
}
}
__device__ int64_t atomicMax64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMax64SkipVal(agg, val, skip_val);
}
}
#undef DEF_SKIP_AGG
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return *agg; \
}
#define DATA_T double
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count_double)
#undef ADDR_T
#undef DATA_T
#define DATA_T float
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_float)
#undef ADDR_T
#undef DATA_T
// Initial value for nullable column is FLOAT_MIN
extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX);
atomicMax(reinterpret_cast<float*>(agg),
__float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val));
}
}
__device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) {
float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX);
return atomicMin(
reinterpret_cast<float*>(address),
__float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val));
}
extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicMinFltSkipVal(agg, val, skip_val);
}
}
__device__ void atomicSumFltSkipVal(float* address,
const float val,
const float skip_val) {
float old = atomicExch(address, 0.f);
atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val);
}
}
__device__ void atomicSumDblSkipVal(double* address,
const double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.)));
atomicAdd(
address,
__double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (__double_as_longlong(val) != __double_as_longlong(skip_val)) {
atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMinDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull =
*reinterpret_cast<const unsigned long long*>(&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *reinterpret_cast<unsigned long long*>(&val)
: __double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMaxDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *((unsigned long long int*)&val)
: __double_as_longlong(max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
#undef DEF_SKIP_AGG
#include "../Utils/ChunkIter.cpp"
#include "DateTruncate.cpp"
#include "ExtractFromTime.cpp"
#define EXECUTE_INCLUDE
#include "ArrayOps.cpp"
#include "DateAdd.cpp"
#include "StringFunctions.cpp"
#undef EXECUTE_INCLUDE
#include "../Utils/Regexp.cpp"
#include "../Utils/StringLike.cpp"
extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) {
// TODO(alex): de-dup, the x64 version is basically identical
ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_);
VarlenDatum vd;
bool is_end;
ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end);
return vd.is_null ? 0
: (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) |
(static_cast<uint64_t>(vd.length) << 48);
}
extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap,
const uint32_t bitmap_bytes,
const uint8_t* key_bytes,
const uint32_t key_len) {
const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
const uint32_t word_idx = bit_pos / 32;
const uint32_t bit_idx = bit_pos % 32;
atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
}
extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
const uint64_t bitmap_idx = val - min_val;
const uint32_t byte_idx = bitmap_idx >> 3;
const uint32_t word_idx = byte_idx >> 2;
const uint32_t byte_word_idx = byte_idx & 3;
const int64_t host_addr = *agg;
uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr +
(threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes);
switch (byte_word_idx) {
case 0:
atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7));
break;
case 1:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8));
break;
case 2:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16));
break;
case 3:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24));
break;
default:
break;
}
}
extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu(
int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t skip_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
if (val != skip_val) {
agg_count_distinct_bitmap_gpu(
agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes);
}
}
extern "C" __device__ void agg_approximate_count_distinct_gpu(
int64_t* agg,
const int64_t key,
const uint32_t b,
const int64_t base_dev_addr,
const int64_t base_host_addr) {
const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
const uint32_t index = hash >> (64 - b);
const int32_t rank = get_rank(hash << b, 64 - b);
const int64_t host_addr = *agg;
int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr);
atomicMax(&M[index], rank);
}
extern "C" __device__ void force_sync() {
__threadfence_block();
}
extern "C" __device__ void sync_warp() {
#if (CUDA_VERSION >= 9000)
__syncwarp();
#endif
}
/**
* Protected warp synchornization to make sure all (or none) threads within a warp go
* through a synchronization barrier. thread_pos: the current thread position to be used
* for a memory access row_count: maximum number of rows to be processed The function
* performs warp sync iff all 32 threads within that warp will process valid data NOTE: it
* currently assumes that warp size is 32.
*/
extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) {
#if (CUDA_VERSION >= 9000)
// only syncing if NOT within the same warp as those threads experiencing the critical
// edge
if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) {
__syncwarp();
}
#endif
}
|
ea4e8cc6b902701a55a2325b2d2f74fdd1f1e0e3.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __GQD_COMMON_CU__
#define __GQD_COMMON_CU__
#include <stdio.h>
//#include <stdlib.h>
#include <limits.h>
//#include <float.h>
//#include "gqd_type.h" //type definitions for gdd_real and gqd_real
#include "cuda_header.h"
#include "gdd_real.h"
#include "gqd_real.h"
//#include "inline.cu" //basic functions used by both gdd_real and gqd_real
//#include "gqd_real.h"
//#define SLOPPY_ADD 1
//#define SLOPPY_MUL 1
//#define SLOPPY_DIV 1
//#define USE_FMA 1
/* type definitions, defined in the type.h */
union type_trans_dbl{
__int64 i64_value;
double dbl_value;
};
/** initialization function */
void GDDStart(int device) {
printf("GDD turns on ...");
hipSetDevice(device);
hipStream_t st_spcl_val;
hipStreamCreate(&st_spcl_val);
double h_special_tbl[2];
type_trans_dbl trans;
h_special_tbl[0] = std::numeric_limits<double>::infinity();
trans.i64_value = (0x7ff0000000000000ULL); //CUDART_INF
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(hipMemcpyToSymbolAsync(__qd_inf, &h_special_tbl, sizeof(h_special_tbl), 0, hipMemcpyHostToDevice, st_spcl_val));
hipStreamSynchronize(st_spcl_val);
h_special_tbl[0] = std::numeric_limits<double>::quiet_NaN();
trans.i64_value = (0xfff8000000000000ULL); //CUDART_NAN
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(hipMemcpyToSymbolAsync(__qd_qnan, &h_special_tbl, sizeof(h_special_tbl), 0, hipMemcpyHostToDevice, st_spcl_val));
hipStreamSynchronize(st_spcl_val);
hipStreamDestroy(st_spcl_val);
printf("\tdone.\n");
}
void GDDEnd() {
printf("GDD turns off...");
hipDeviceReset();
printf("\tdone.\n");
}
void GQDEnd() {
printf("GQD turns off...");
hipDeviceReset();
printf("\tdone.\n");
}
void GQDStart(int device) {
printf("GQD turns on ...");
hipSetDevice(device);
hipStream_t st_spcl_val;
hipStreamCreate(&st_spcl_val);
double h_special_tbl[2];
type_trans_dbl trans;
h_special_tbl[0] = std::numeric_limits<double>::infinity();
trans.i64_value = (0x7ff0000000000000ULL); //CUDART_INF
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(hipMemcpyToSymbolAsync(__dd_inf, &h_special_tbl, sizeof(h_special_tbl), 0, hipMemcpyHostToDevice, st_spcl_val));
hipStreamSynchronize(st_spcl_val);
h_special_tbl[0] = std::numeric_limits<double>::quiet_NaN();
trans.i64_value = (0xfff8000000000000ULL); //CUDART_NAN
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(hipMemcpyToSymbolAsync(__dd_qnan, &h_special_tbl, sizeof(h_special_tbl), 0, hipMemcpyHostToDevice, st_spcl_val));
hipStreamSynchronize(st_spcl_val);
hipStreamDestroy(st_spcl_val);
printf("\tdone.\n");
}
//__host__
//int convSPValForHost(gdd_real *buff, unsigned int elements){
// double h_qnan = std::numeric_limits<double>::quiet_NaN();
// double h_inf = std::numeric_limits<double>::infinity();
//
// for (unsigned int i = 0; i < elements; i++){
// // transform to hosts qnan if element is gpu_nan
// gdd_real t = buff[i];
// if (isnan(t)){
// buff[i].dd.x = h_qnan;
// buff[i].dd.y = h_qnan;
// } else if (isinf(t)) {
// buff[i].dd.x = h_inf;
// buff[i].dd.y = h_inf;
// } else {
// // nothing to do
// }
// i++;
// }
// return 0;
//}
//__device__
//int convSPValFordevice(gdd_real *buff, unsigned int elements){
// for (unsigned int i = 0; i < elements; i++) {
// // transform to hosts qnan if element is gpu_nan
// gdd_real t = buff[i];
// if (isnan(t)) {
// buff[i] = _dd_qnan;
// } else if (isinf(t)) {
// buff[i] = _dd_inf;
// } else {
// // nothing to do
// }
// i++;
// }
// return 0;
//}
#endif /* __GQD_COMMON_CU__ */
| ea4e8cc6b902701a55a2325b2d2f74fdd1f1e0e3.cu | #ifndef __GQD_COMMON_CU__
#define __GQD_COMMON_CU__
#include <stdio.h>
//#include <stdlib.h>
#include <limits.h>
//#include <float.h>
//#include "gqd_type.h" //type definitions for gdd_real and gqd_real
#include "cuda_header.h"
#include "gdd_real.h"
#include "gqd_real.h"
//#include "inline.cu" //basic functions used by both gdd_real and gqd_real
//#include "gqd_real.h"
//#define SLOPPY_ADD 1
//#define SLOPPY_MUL 1
//#define SLOPPY_DIV 1
//#define USE_FMA 1
/* type definitions, defined in the type.h */
union type_trans_dbl{
__int64 i64_value;
double dbl_value;
};
/** initialization function */
void GDDStart(int device) {
printf("GDD turns on ...");
cudaSetDevice(device);
cudaStream_t st_spcl_val;
cudaStreamCreate(&st_spcl_val);
double h_special_tbl[2];
type_trans_dbl trans;
h_special_tbl[0] = std::numeric_limits<double>::infinity();
trans.i64_value = (0x7ff0000000000000ULL); //CUDART_INF
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(cudaMemcpyToSymbolAsync(__qd_inf, &h_special_tbl, sizeof(h_special_tbl), 0, cudaMemcpyHostToDevice, st_spcl_val));
cudaStreamSynchronize(st_spcl_val);
h_special_tbl[0] = std::numeric_limits<double>::quiet_NaN();
trans.i64_value = (0xfff8000000000000ULL); //CUDART_NAN
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(cudaMemcpyToSymbolAsync(__qd_qnan, &h_special_tbl, sizeof(h_special_tbl), 0, cudaMemcpyHostToDevice, st_spcl_val));
cudaStreamSynchronize(st_spcl_val);
cudaStreamDestroy(st_spcl_val);
printf("\tdone.\n");
}
void GDDEnd() {
printf("GDD turns off...");
cudaDeviceReset();
printf("\tdone.\n");
}
void GQDEnd() {
printf("GQD turns off...");
cudaDeviceReset();
printf("\tdone.\n");
}
void GQDStart(int device) {
printf("GQD turns on ...");
cudaSetDevice(device);
cudaStream_t st_spcl_val;
cudaStreamCreate(&st_spcl_val);
double h_special_tbl[2];
type_trans_dbl trans;
h_special_tbl[0] = std::numeric_limits<double>::infinity();
trans.i64_value = (0x7ff0000000000000ULL); //CUDART_INF
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(cudaMemcpyToSymbolAsync(__dd_inf, &h_special_tbl, sizeof(h_special_tbl), 0, cudaMemcpyHostToDevice, st_spcl_val));
cudaStreamSynchronize(st_spcl_val);
h_special_tbl[0] = std::numeric_limits<double>::quiet_NaN();
trans.i64_value = (0xfff8000000000000ULL); //CUDART_NAN
h_special_tbl[1] = trans.dbl_value;
checkCudaErrors(cudaMemcpyToSymbolAsync(__dd_qnan, &h_special_tbl, sizeof(h_special_tbl), 0, cudaMemcpyHostToDevice, st_spcl_val));
cudaStreamSynchronize(st_spcl_val);
cudaStreamDestroy(st_spcl_val);
printf("\tdone.\n");
}
//__host__
//int convSPValForHost(gdd_real *buff, unsigned int elements){
// double h_qnan = std::numeric_limits<double>::quiet_NaN();
// double h_inf = std::numeric_limits<double>::infinity();
//
// for (unsigned int i = 0; i < elements; i++){
// // transform to hosts qnan if element is gpu_nan
// gdd_real t = buff[i];
// if (isnan(t)){
// buff[i].dd.x = h_qnan;
// buff[i].dd.y = h_qnan;
// } else if (isinf(t)) {
// buff[i].dd.x = h_inf;
// buff[i].dd.y = h_inf;
// } else {
// // nothing to do
// }
// i++;
// }
// return 0;
//}
//__device__
//int convSPValFordevice(gdd_real *buff, unsigned int elements){
// for (unsigned int i = 0; i < elements; i++) {
// // transform to hosts qnan if element is gpu_nan
// gdd_real t = buff[i];
// if (isnan(t)) {
// buff[i] = _dd_qnan;
// } else if (isinf(t)) {
// buff[i] = _dd_inf;
// } else {
// // nothing to do
// }
// i++;
// }
// return 0;
//}
#endif /* __GQD_COMMON_CU__ */
|
8a49db4700d2122e1b3d822866aa754879f7168d.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Name : convolution2d.cu
// Author : Harshit Jain
// Class : ECE 8823
// GTID : 903024992
// Assignment : Assignment 1
// Copyright : Public
// Description : 2D Convolution in CUDA
//============================================================================
/************************************************/
// Kernel Size | Min Trans. | Max Trans. //
// 3 | 293764 | 293764 //
// 5 | 327184 | 327184 //
// 7 | 362404 | 362404 //
// 9 | 399424 | 399424 //
// 11 | 438244 | 438244 //
// 13 | 478864 | 478864 //
// 15 | 521284 | 521284 //
// 17 | 565504 | 565504 //
// 19 | 611524 | 611524 //
// 21 | 659344 | 659344 //
// 23 | 708964 | 708964 //
// 25 | 760384 | 760384 //
// 27 | 813604 | 813604 //
// 29 | 868624 | 868624 //
// 31 | 925444 | 925444 //
/************************************************/
#include <iostream>
#include <fstream>
#include <cassert>
#include <sstream>
#include "pgma_io.hpp"
#include <vector>
#include <string>
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#define TILE_WIDTH 32
#define KERNEL_SIZE 7
//extern __shared__ int s[];
#define checkCudaError(status) { \
if(status != hipSuccess) { \
std::cout << "CUDA Error " << __FILE__ << ", " << __LINE__ \
<< ": " << hipGetErrorString(status) << "\n"; \
exit(-1); \
} \
}
__constant__ int gpuKernel[31*31];
__global__ void convolutionGPU(int* inputImage, int* outputImage, int imageWidth, int kernelSize, int totalVal) {
//ADD CODE HERE
//Shared memory of size TILE_WIDTH plus apron width on top and bottom
extern __shared__ int sharedImageData[];
//get kernel radius
int kRadius = kernelSize/2;
//get particular thread data location in input image
int threadDataLoc = threadIdx.x + blockIdx.x*blockDim.x + threadIdx.y*imageWidth + (blockIdx.y*blockDim.y)*imageWidth;
//get thread x,y coordinates
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
//Boundary x=0 y=0 copies upper apron
if(threadIdx.x==0 && threadIdx.y==0){
if(blockIdx.y==0){
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
}
}
else{
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
if(blockIdx.x==0 && col < kRadius){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else if(blockIdx.x==gridDim.x-1 && col>TILE_WIDTH+kRadius-1){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else{
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = inputImage[threadDataLoc-kRadius*imageWidth - kRadius + col + row*imageWidth];
}
}
}
}
}
//Boundary threadID x=0, y=blockDim-1 copies lower apron
else if(threadIdx.x==0 && threadIdx.y==blockDim.y-1){
int starting_index = (TILE_WIDTH + kRadius)*(TILE_WIDTH+kernelSize-1);
if(blockIdx.y == gridDim.y-1){
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
}
}
else{
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
if(blockIdx.x==0 && col < kRadius){
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else if(blockIdx.x==gridDim.x-1 && col>TILE_WIDTH+kRadius-1){
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else{
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] = inputImage[threadDataLoc + imageWidth -kRadius + col + row*imageWidth];
}
}
}
}
}
//Side apron and image data by thread ID x=0
if(threadIdx.x==0){
int row = threadIdx.y + kRadius;
for(int col =0 ; col <TILE_WIDTH +kernelSize -1; col++){
if(col < kRadius && blockIdx.x==0){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
else if(col > (TILE_WIDTH + kRadius -1) && blockIdx.x==gridDim.x-1){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
else{
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = inputImage[threadDataLoc+col-kRadius];
}
}
}
__syncthreads();
int value = 0;
for (int kRow = -kRadius; kRow <= kRadius; kRow++)
for (int kCol = -kRadius; kCol <= kRadius; kCol++){
value += sharedImageData[(threadIdx.x+kRadius) + kCol+ (threadIdx.y + kRadius + kRow)*(TILE_WIDTH+kernelSize-1)] * gpuKernel[(kRadius + kRow)*kernelSize + kRadius + kCol];
}
outputImage[threadDataLoc] = value/totalVal;
}
class PGM
{
public:
PGM() : N(0), ptr(NULL) {}
PGM(const PGM &rhs) : N(0), ptr(NULL)
{
copy(rhs);
}
~PGM() {
if (ptr != NULL) {
delete [] ptr;
}
}
PGM& operator=(const PGM &rhs)
{
if (this == &rhs)
return *this;
return copy(rhs);
}
PGM& copy(const PGM &rhs)
{
if (ptr != NULL)
{
delete [] ptr;
}
N = rhs.N;
size_t imageSize = N * N * sizeof *(rhs.ptr);
ptr = new int[imageSize];
memcpy(ptr, rhs.ptr, imageSize);
return *this;
}
bool operator==(const PGM &rhs) const
{
if (N == rhs.N) {
for(int i = 0; i < N * N; i++)
{
if (ptr[i] != rhs.ptr[i])
{
return false;
}
}
} else {
return false;
}
return true;
}
int N;
int *ptr;
};
PGM getImage(std::string fileName)
{
PGM image;
int x, y, maxVal;
pgma_read(fileName, x, y, maxVal, &(image.ptr));
assert(x == y);
image.N = x;
return image;
}
int main(int argc, char** argv)
{
assert(argc > 2);
std::vector<std::string> args;
std::copy(argv+1, argv + argc, std::back_inserter(args));
std::string fileName = args[1];
PGM image = getImage(fileName);
PGM hostOutput = image;
//construct kxk filter
std::cout << "Constructing kernel:\n";
int k = atoi(args[0].c_str());
assert(k % 2 == 1);
int *kernel = new int[k*k*sizeof(int)];
int totalVal = 0;
for (int row = 0; row < k; row++)
{
for (int col = 0; col < k; col++)
{
int colVal = (col < (k/2+1)) ? col+1 : k-col;
int rowVal = (row < (k/2+1)) ? row+1 : k-row;
kernel[row * k + col] = colVal + rowVal;
totalVal += colVal + rowVal;
std::cout << kernel[row*k + col] << " ";
}
std::cout << "\n";
}
PGM gpuOutput = image;
std::cout << "Image Width : " << image.N << std::endl;
/////////////////////////////////////////////////////////////////
// Insert CUDA launch code here
/////////////////////////////////////////////////////////////////
int device;
int * gpuInputImage, * gpuOutputImage;
checkCudaError(hipSetDevice(5));
checkCudaError(hipGetDevice(&device));
hipDeviceProp_t prop;
checkCudaError(hipGetDeviceProperties(&prop, device));
std::cout << "Device " << device << ": " << prop.name << "\n";
std::cout << "GPU/SM Cores: " << prop.multiProcessorCount << "\n";
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << "\n";
std::cout << "Shared Memory per Block: " << (prop.sharedMemPerBlock>>10) << "\n";
checkCudaError(hipMalloc(&gpuInputImage, image.N * image.N * sizeof(int)));
std::cout << "Woks" << std::endl;
checkCudaError(hipMalloc(&gpuOutputImage, image.N * image.N * sizeof(int)));
checkCudaError(hipMemcpy(gpuInputImage, image.ptr, image.N * image.N * sizeof(int), hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(gpuOutputImage, gpuOutput.ptr, image.N * image.N * sizeof(int), hipMemcpyHostToDevice));
checkCudaError(hipMemcpyToSymbol(gpuKernel, kernel, k * k * sizeof(int)));
const int GRID_SIZE = ceil((float)image.N/TILE_WIDTH);
std::cout << GRID_SIZE << std::endl;
const int CTA_SIZE = TILE_WIDTH;
std::cout << "Image size: " << image.N << "X" << image.N << std::endl << "Threads per block: " << CTA_SIZE << "X" << CTA_SIZE << std::endl << "Blocks: " << GRID_SIZE << "X" << GRID_SIZE << std::endl;
dim3 dimBlock(CTA_SIZE,CTA_SIZE,1);
dim3 dimGrid(GRID_SIZE,GRID_SIZE);
std::cout << "Running GPU kernel\n\n";
//use k, kernel, image.N and image.ptr as your inputs
//copy output to gpuOutput.ptr, data is already allocated
//make sure the dimensions of the image are the same
int shared_memory = sizeof(int)*(TILE_WIDTH+k-1)*(TILE_WIDTH+k-1);
printf("Shared Memory: %d\n",shared_memory>>10);
hipProfilerStart();
hipLaunchKernelGGL(( convolutionGPU), dim3(dimGrid), dim3(dimBlock), shared_memory, 0, gpuInputImage, gpuOutputImage, image.N, k, totalVal);
hipProfilerStop();
checkCudaError(hipDeviceSynchronize());
hipMemcpy(gpuOutput.ptr, gpuOutputImage, image.N * image.N * sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n",image.ptr[0]);
/////////////////////////////////////////////////////////////////
//CPU convolution
std::cout << "Running host kernel\n\n";
int kRadius = k/2;
for (int row = 0; row < image.N; row++)
{
for (int col = 0; col < image.N; col++)
{
//sample from neighbor pixels
int index = row * image.N + col;
int value = 0;
for (int kRow = -kRadius; kRow <= kRadius; kRow++)
{
//image bounds check
if (row+kRow < 0 || row+kRow >= image.N)
continue;
for (int kCol = -kRadius; kCol <= kRadius; kCol++)
{
//image bounds check
if (col+kCol < 0 || col+kCol >= image.N)
continue;
value += kernel[(kRadius + kRow)*k + kRadius + kCol] * image.ptr[index + kRow*image.N + kCol];
}
}
hostOutput.ptr[index] = value / totalVal;
}
}
std::cout << "Comparing results:\n";
bool passed = hostOutput == gpuOutput;
std::string resultString = (passed) ? "Passed\n" : "Failed\n";
std::cout << resultString;
std::cout << "Writing image outputs: output_host.pgm output_gpu.pgm\n";
std::string outputFileName = "output_host.pgm";
pgma_write(outputFileName, image.N, image.N, hostOutput.ptr);
outputFileName = "output_gpu.pgm";
pgma_write(outputFileName, image.N, image.N, gpuOutput.ptr);
delete(kernel);
}
| 8a49db4700d2122e1b3d822866aa754879f7168d.cu | //============================================================================
// Name : convolution2d.cu
// Author : Harshit Jain
// Class : ECE 8823
// GTID : 903024992
// Assignment : Assignment 1
// Copyright : Public
// Description : 2D Convolution in CUDA
//============================================================================
/************************************************/
// Kernel Size | Min Trans. | Max Trans. //
// 3 | 293764 | 293764 //
// 5 | 327184 | 327184 //
// 7 | 362404 | 362404 //
// 9 | 399424 | 399424 //
// 11 | 438244 | 438244 //
// 13 | 478864 | 478864 //
// 15 | 521284 | 521284 //
// 17 | 565504 | 565504 //
// 19 | 611524 | 611524 //
// 21 | 659344 | 659344 //
// 23 | 708964 | 708964 //
// 25 | 760384 | 760384 //
// 27 | 813604 | 813604 //
// 29 | 868624 | 868624 //
// 31 | 925444 | 925444 //
/************************************************/
#include <iostream>
#include <fstream>
#include <cassert>
#include <sstream>
#include "pgma_io.hpp"
#include <vector>
#include <string>
#include <algorithm>
#include <cuda_profiler_api.h>
#include <stdio.h>
#define TILE_WIDTH 32
#define KERNEL_SIZE 7
//extern __shared__ int s[];
#define checkCudaError(status) { \
if(status != cudaSuccess) { \
std::cout << "CUDA Error " << __FILE__ << ", " << __LINE__ \
<< ": " << cudaGetErrorString(status) << "\n"; \
exit(-1); \
} \
}
__constant__ int gpuKernel[31*31];
__global__ void convolutionGPU(int* inputImage, int* outputImage, int imageWidth, int kernelSize, int totalVal) {
//ADD CODE HERE
//Shared memory of size TILE_WIDTH plus apron width on top and bottom
extern __shared__ int sharedImageData[];
//get kernel radius
int kRadius = kernelSize/2;
//get particular thread data location in input image
int threadDataLoc = threadIdx.x + blockIdx.x*blockDim.x + threadIdx.y*imageWidth + (blockIdx.y*blockDim.y)*imageWidth;
//get thread x,y coordinates
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
//Boundary x=0 y=0 copies upper apron
if(threadIdx.x==0 && threadIdx.y==0){
if(blockIdx.y==0){
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
}
}
else{
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
if(blockIdx.x==0 && col < kRadius){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else if(blockIdx.x==gridDim.x-1 && col>TILE_WIDTH+kRadius-1){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else{
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = inputImage[threadDataLoc-kRadius*imageWidth - kRadius + col + row*imageWidth];
}
}
}
}
}
//Boundary threadID x=0, y=blockDim-1 copies lower apron
else if(threadIdx.x==0 && threadIdx.y==blockDim.y-1){
int starting_index = (TILE_WIDTH + kRadius)*(TILE_WIDTH+kernelSize-1);
if(blockIdx.y == gridDim.y-1){
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
}
}
else{
for(int row=0; row<kRadius ; row++){
for(int col=0; col<TILE_WIDTH+kernelSize-1; col++){
if(blockIdx.x==0 && col < kRadius){
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else if(blockIdx.x==gridDim.x-1 && col>TILE_WIDTH+kRadius-1){
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] =0;
}
else{
sharedImageData[starting_index + row*(TILE_WIDTH+kernelSize-1)+col] = inputImage[threadDataLoc + imageWidth -kRadius + col + row*imageWidth];
}
}
}
}
}
//Side apron and image data by thread ID x=0
if(threadIdx.x==0){
int row = threadIdx.y + kRadius;
for(int col =0 ; col <TILE_WIDTH +kernelSize -1; col++){
if(col < kRadius && blockIdx.x==0){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
else if(col > (TILE_WIDTH + kRadius -1) && blockIdx.x==gridDim.x-1){
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = 0;
}
else{
sharedImageData[row*(TILE_WIDTH+kernelSize-1)+col] = inputImage[threadDataLoc+col-kRadius];
}
}
}
__syncthreads();
int value = 0;
for (int kRow = -kRadius; kRow <= kRadius; kRow++)
for (int kCol = -kRadius; kCol <= kRadius; kCol++){
value += sharedImageData[(threadIdx.x+kRadius) + kCol+ (threadIdx.y + kRadius + kRow)*(TILE_WIDTH+kernelSize-1)] * gpuKernel[(kRadius + kRow)*kernelSize + kRadius + kCol];
}
outputImage[threadDataLoc] = value/totalVal;
}
class PGM
{
public:
PGM() : N(0), ptr(NULL) {}
PGM(const PGM &rhs) : N(0), ptr(NULL)
{
copy(rhs);
}
~PGM() {
if (ptr != NULL) {
delete [] ptr;
}
}
PGM& operator=(const PGM &rhs)
{
if (this == &rhs)
return *this;
return copy(rhs);
}
PGM& copy(const PGM &rhs)
{
if (ptr != NULL)
{
delete [] ptr;
}
N = rhs.N;
size_t imageSize = N * N * sizeof *(rhs.ptr);
ptr = new int[imageSize];
memcpy(ptr, rhs.ptr, imageSize);
return *this;
}
bool operator==(const PGM &rhs) const
{
if (N == rhs.N) {
for(int i = 0; i < N * N; i++)
{
if (ptr[i] != rhs.ptr[i])
{
return false;
}
}
} else {
return false;
}
return true;
}
int N;
int *ptr;
};
PGM getImage(std::string fileName)
{
PGM image;
int x, y, maxVal;
pgma_read(fileName, x, y, maxVal, &(image.ptr));
assert(x == y);
image.N = x;
return image;
}
int main(int argc, char** argv)
{
assert(argc > 2);
std::vector<std::string> args;
std::copy(argv+1, argv + argc, std::back_inserter(args));
std::string fileName = args[1];
PGM image = getImage(fileName);
PGM hostOutput = image;
//construct kxk filter
std::cout << "Constructing kernel:\n";
int k = atoi(args[0].c_str());
assert(k % 2 == 1);
int *kernel = new int[k*k*sizeof(int)];
int totalVal = 0;
for (int row = 0; row < k; row++)
{
for (int col = 0; col < k; col++)
{
int colVal = (col < (k/2+1)) ? col+1 : k-col;
int rowVal = (row < (k/2+1)) ? row+1 : k-row;
kernel[row * k + col] = colVal + rowVal;
totalVal += colVal + rowVal;
std::cout << kernel[row*k + col] << " ";
}
std::cout << "\n";
}
PGM gpuOutput = image;
std::cout << "Image Width : " << image.N << std::endl;
/////////////////////////////////////////////////////////////////
// Insert CUDA launch code here
/////////////////////////////////////////////////////////////////
int device;
int * gpuInputImage, * gpuOutputImage;
checkCudaError(cudaSetDevice(5));
checkCudaError(cudaGetDevice(&device));
cudaDeviceProp prop;
checkCudaError(cudaGetDeviceProperties(&prop, device));
std::cout << "Device " << device << ": " << prop.name << "\n";
std::cout << "GPU/SM Cores: " << prop.multiProcessorCount << "\n";
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << "\n";
std::cout << "Shared Memory per Block: " << (prop.sharedMemPerBlock>>10) << "\n";
checkCudaError(cudaMalloc(&gpuInputImage, image.N * image.N * sizeof(int)));
std::cout << "Woks" << std::endl;
checkCudaError(cudaMalloc(&gpuOutputImage, image.N * image.N * sizeof(int)));
checkCudaError(cudaMemcpy(gpuInputImage, image.ptr, image.N * image.N * sizeof(int), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(gpuOutputImage, gpuOutput.ptr, image.N * image.N * sizeof(int), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpyToSymbol(gpuKernel, kernel, k * k * sizeof(int)));
const int GRID_SIZE = ceil((float)image.N/TILE_WIDTH);
std::cout << GRID_SIZE << std::endl;
const int CTA_SIZE = TILE_WIDTH;
std::cout << "Image size: " << image.N << "X" << image.N << std::endl << "Threads per block: " << CTA_SIZE << "X" << CTA_SIZE << std::endl << "Blocks: " << GRID_SIZE << "X" << GRID_SIZE << std::endl;
dim3 dimBlock(CTA_SIZE,CTA_SIZE,1);
dim3 dimGrid(GRID_SIZE,GRID_SIZE);
std::cout << "Running GPU kernel\n\n";
//use k, kernel, image.N and image.ptr as your inputs
//copy output to gpuOutput.ptr, data is already allocated
//make sure the dimensions of the image are the same
int shared_memory = sizeof(int)*(TILE_WIDTH+k-1)*(TILE_WIDTH+k-1);
printf("Shared Memory: %d\n",shared_memory>>10);
cudaProfilerStart();
convolutionGPU<<<dimGrid, dimBlock, shared_memory>>>(gpuInputImage, gpuOutputImage, image.N, k, totalVal);
cudaProfilerStop();
checkCudaError(cudaDeviceSynchronize());
cudaMemcpy(gpuOutput.ptr, gpuOutputImage, image.N * image.N * sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",image.ptr[0]);
/////////////////////////////////////////////////////////////////
//CPU convolution
std::cout << "Running host kernel\n\n";
int kRadius = k/2;
for (int row = 0; row < image.N; row++)
{
for (int col = 0; col < image.N; col++)
{
//sample from neighbor pixels
int index = row * image.N + col;
int value = 0;
for (int kRow = -kRadius; kRow <= kRadius; kRow++)
{
//image bounds check
if (row+kRow < 0 || row+kRow >= image.N)
continue;
for (int kCol = -kRadius; kCol <= kRadius; kCol++)
{
//image bounds check
if (col+kCol < 0 || col+kCol >= image.N)
continue;
value += kernel[(kRadius + kRow)*k + kRadius + kCol] * image.ptr[index + kRow*image.N + kCol];
}
}
hostOutput.ptr[index] = value / totalVal;
}
}
std::cout << "Comparing results:\n";
bool passed = hostOutput == gpuOutput;
std::string resultString = (passed) ? "Passed\n" : "Failed\n";
std::cout << resultString;
std::cout << "Writing image outputs: output_host.pgm output_gpu.pgm\n";
std::string outputFileName = "output_host.pgm";
pgma_write(outputFileName, image.N, image.N, hostOutput.ptr);
outputFileName = "output_gpu.pgm";
pgma_write(outputFileName, image.N, image.N, gpuOutput.ptr);
delete(kernel);
}
|
9894d76c037439f97636eaee4630b22f1d9974cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/mpitask.hpp"
namespace caffe {
template <typename Dtype>
void GatherLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
hipDeviceSynchronize();
for (int i = 0; i < bottom.size(); ++i) {
MPI_Gather(bottom[i]->gpu_data(),bottom[i]->count(),MPI_FLOAT,top[i]->mutable_gpu_data(),
bottom[i]->count(),MPI_FLOAT,0,MPI_COMM_WORLD);
hipDeviceSynchronize();
}
}
template <typename Dtype>
void GatherLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
hipDeviceSynchronize();
MpiTaskList<Dtype> *task_list = (MpiTaskList<Dtype> *)Caffe::getTaskList();
task_list->wait_all_task();
for (int i = 0; i < top.size(); ++i) {
MPI_Scatter(top[i]->gpu_diff(),bottom[i]->count(),MPI_FLOAT,bottom[i]->mutable_gpu_diff(),bottom[i]->count(),
MPI_FLOAT,0,MPI_COMM_WORLD);
hipDeviceSynchronize();
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GatherLayer);
} // namespace caffe
| 9894d76c037439f97636eaee4630b22f1d9974cc.cu | #include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/mpitask.hpp"
namespace caffe {
template <typename Dtype>
void GatherLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
cudaDeviceSynchronize();
for (int i = 0; i < bottom.size(); ++i) {
MPI_Gather(bottom[i]->gpu_data(),bottom[i]->count(),MPI_FLOAT,top[i]->mutable_gpu_data(),
bottom[i]->count(),MPI_FLOAT,0,MPI_COMM_WORLD);
cudaDeviceSynchronize();
}
}
template <typename Dtype>
void GatherLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
cudaDeviceSynchronize();
MpiTaskList<Dtype> *task_list = (MpiTaskList<Dtype> *)Caffe::getTaskList();
task_list->wait_all_task();
for (int i = 0; i < top.size(); ++i) {
MPI_Scatter(top[i]->gpu_diff(),bottom[i]->count(),MPI_FLOAT,bottom[i]->mutable_gpu_diff(),bottom[i]->count(),
MPI_FLOAT,0,MPI_COMM_WORLD);
cudaDeviceSynchronize();
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GatherLayer);
} // namespace caffe
|
24a6cd90ce0db3760faf754a0d6bfc53f9e54df1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <random>
#include <limits>
#include <cuda/api_wrappers.h>
#include "HeterogeneousCore/CUDAUtilities/interface/HistoContainer.h"
#include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h"
template<typename T, int NBINS, int S, int DELTA>
__global__
void mykernel(T const * __restrict__ v, uint32_t N) {
assert(v);
assert(N==12000);
if (threadIdx.x==0) printf("start kernel for %d data\n",N);
using Hist = HistoContainer<T,NBINS,12000,S,uint16_t>;
__shared__ Hist hist;
__shared__ typename Hist::Counter ws[32];
for (auto j=threadIdx.x; j<Hist::totbins(); j+=blockDim.x) { hist.off[j]=0;}
__syncthreads();
for (auto j=threadIdx.x; j<N; j+=blockDim.x) hist.count(v[j]);
__syncthreads();
assert(0==hist.size());
__syncthreads();
hist.finalize(ws);
__syncthreads();
assert(N==hist.size());
for (auto j=threadIdx.x; j<Hist::nbins(); j+=blockDim.x) assert(hist.off[j]<=hist.off[j+1]);
__syncthreads();
if (threadIdx.x<32) ws[threadIdx.x]=0; // used by prefix scan...
__syncthreads();
for (auto j=threadIdx.x; j<N; j+=blockDim.x) hist.fill(v[j],j);
__syncthreads();
assert(0==hist.off[0]);
assert(N==hist.size());
for (auto j=threadIdx.x; j<hist.size()-1; j+=blockDim.x) {
auto p = hist.begin()+j;
assert((*p)<N);
auto k1 = Hist::bin(v[*p]);
auto k2 = Hist::bin(v[*(p+1)]);
assert(k2>=k1);
}
for (auto i=threadIdx.x; i<hist.size(); i+=blockDim.x) {
auto p = hist.begin()+i;
auto j = *p;
auto b0 = Hist::bin(v[j]);
int tot=0;
auto ftest = [&](int k) {
assert(k>=0 && k<N);
++tot;
};
forEachInWindow(hist,v[j],v[j],ftest);
int rtot = hist.size(b0);
assert(tot==rtot);
tot=0;
auto vm = int(v[j])-DELTA;
auto vp = int(v[j])+DELTA;
constexpr int vmax = NBINS!=128 ? NBINS*2-1 : std::numeric_limits<T>::max();
vm = ::max(vm, 0);
vm = ::min(vm,vmax);
vp = ::min(vp,vmax);
vp = ::max(vp, 0);
assert(vp>=vm);
forEachInWindow(hist, vm,vp, ftest);
int bp = Hist::bin(vp);
int bm = Hist::bin(vm);
rtot = hist.end(bp)-hist.begin(bm);
assert(tot==rtot);
}
}
template<typename T, int NBINS=128, int S=8*sizeof(T), int DELTA=1000>
void go() {
if (cuda::device::count() == 0) {
std::cerr << "No CUDA devices on this system" << "\n";
exit(EXIT_FAILURE);
}
auto current_device = cuda::device::current::get();
std::mt19937 eng;
int rmin=std::numeric_limits<T>::min();
int rmax=std::numeric_limits<T>::max();
if (NBINS!=128) {
rmin=0;
rmax=NBINS*2-1;
}
std::uniform_int_distribution<T> rgen(rmin,rmax);
constexpr int N=12000;
T v[N];
auto v_d = cuda::memory::device::make_unique<T[]>(current_device, N);
assert(v_d.get());
using Hist = HistoContainer<T,NBINS,N,S>;
std::cout << "HistoContainer " << Hist::nbits() << ' ' << Hist::nbins() << ' ' << Hist::capacity() << ' ' << (rmax-rmin)/Hist::nbins() << std::endl;
std::cout << "bins " << int(Hist::bin(0)) << ' ' << int(Hist::bin(rmin)) << ' ' << int(Hist::bin(rmax)) << std::endl;
for (int it=0; it<5; ++it) {
for (long long j = 0; j < N; j++) v[j]=rgen(eng);
if (it==2) for (long long j = N/2; j < N/2+N/4; j++) v[j]=4;
assert(v_d.get());
assert(v);
cuda::memory::copy(v_d.get(), v, N*sizeof(T));
assert(v_d.get());
cuda::launch(mykernel<T,NBINS,S,DELTA>,{1,256},v_d.get(),N);
}
}
int main() {
exitSansCUDADevices();
go<int16_t>();
go<uint8_t,128,8,4>();
go<uint16_t,313/2,9,4>();
return 0;
}
| 24a6cd90ce0db3760faf754a0d6bfc53f9e54df1.cu | #include <algorithm>
#include <cassert>
#include <iostream>
#include <random>
#include <limits>
#include <cuda/api_wrappers.h>
#include "HeterogeneousCore/CUDAUtilities/interface/HistoContainer.h"
#include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h"
template<typename T, int NBINS, int S, int DELTA>
__global__
void mykernel(T const * __restrict__ v, uint32_t N) {
assert(v);
assert(N==12000);
if (threadIdx.x==0) printf("start kernel for %d data\n",N);
using Hist = HistoContainer<T,NBINS,12000,S,uint16_t>;
__shared__ Hist hist;
__shared__ typename Hist::Counter ws[32];
for (auto j=threadIdx.x; j<Hist::totbins(); j+=blockDim.x) { hist.off[j]=0;}
__syncthreads();
for (auto j=threadIdx.x; j<N; j+=blockDim.x) hist.count(v[j]);
__syncthreads();
assert(0==hist.size());
__syncthreads();
hist.finalize(ws);
__syncthreads();
assert(N==hist.size());
for (auto j=threadIdx.x; j<Hist::nbins(); j+=blockDim.x) assert(hist.off[j]<=hist.off[j+1]);
__syncthreads();
if (threadIdx.x<32) ws[threadIdx.x]=0; // used by prefix scan...
__syncthreads();
for (auto j=threadIdx.x; j<N; j+=blockDim.x) hist.fill(v[j],j);
__syncthreads();
assert(0==hist.off[0]);
assert(N==hist.size());
for (auto j=threadIdx.x; j<hist.size()-1; j+=blockDim.x) {
auto p = hist.begin()+j;
assert((*p)<N);
auto k1 = Hist::bin(v[*p]);
auto k2 = Hist::bin(v[*(p+1)]);
assert(k2>=k1);
}
for (auto i=threadIdx.x; i<hist.size(); i+=blockDim.x) {
auto p = hist.begin()+i;
auto j = *p;
auto b0 = Hist::bin(v[j]);
int tot=0;
auto ftest = [&](int k) {
assert(k>=0 && k<N);
++tot;
};
forEachInWindow(hist,v[j],v[j],ftest);
int rtot = hist.size(b0);
assert(tot==rtot);
tot=0;
auto vm = int(v[j])-DELTA;
auto vp = int(v[j])+DELTA;
constexpr int vmax = NBINS!=128 ? NBINS*2-1 : std::numeric_limits<T>::max();
vm = std::max(vm, 0);
vm = std::min(vm,vmax);
vp = std::min(vp,vmax);
vp = std::max(vp, 0);
assert(vp>=vm);
forEachInWindow(hist, vm,vp, ftest);
int bp = Hist::bin(vp);
int bm = Hist::bin(vm);
rtot = hist.end(bp)-hist.begin(bm);
assert(tot==rtot);
}
}
template<typename T, int NBINS=128, int S=8*sizeof(T), int DELTA=1000>
void go() {
if (cuda::device::count() == 0) {
std::cerr << "No CUDA devices on this system" << "\n";
exit(EXIT_FAILURE);
}
auto current_device = cuda::device::current::get();
std::mt19937 eng;
int rmin=std::numeric_limits<T>::min();
int rmax=std::numeric_limits<T>::max();
if (NBINS!=128) {
rmin=0;
rmax=NBINS*2-1;
}
std::uniform_int_distribution<T> rgen(rmin,rmax);
constexpr int N=12000;
T v[N];
auto v_d = cuda::memory::device::make_unique<T[]>(current_device, N);
assert(v_d.get());
using Hist = HistoContainer<T,NBINS,N,S>;
std::cout << "HistoContainer " << Hist::nbits() << ' ' << Hist::nbins() << ' ' << Hist::capacity() << ' ' << (rmax-rmin)/Hist::nbins() << std::endl;
std::cout << "bins " << int(Hist::bin(0)) << ' ' << int(Hist::bin(rmin)) << ' ' << int(Hist::bin(rmax)) << std::endl;
for (int it=0; it<5; ++it) {
for (long long j = 0; j < N; j++) v[j]=rgen(eng);
if (it==2) for (long long j = N/2; j < N/2+N/4; j++) v[j]=4;
assert(v_d.get());
assert(v);
cuda::memory::copy(v_d.get(), v, N*sizeof(T));
assert(v_d.get());
cuda::launch(mykernel<T,NBINS,S,DELTA>,{1,256},v_d.get(),N);
}
}
int main() {
exitSansCUDADevices();
go<int16_t>();
go<uint8_t,128,8,4>();
go<uint16_t,313/2,9,4>();
return 0;
}
|
caec61bc53165499c247c3caccdf934bfd5bb5fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel2_z;
int xdim0_advec_mom_kernel2_z_h = -1;
__constant__ int ydim0_advec_mom_kernel2_z;
int ydim0_advec_mom_kernel2_z_h = -1;
__constant__ int xdim1_advec_mom_kernel2_z;
int xdim1_advec_mom_kernel2_z_h = -1;
__constant__ int ydim1_advec_mom_kernel2_z;
int ydim1_advec_mom_kernel2_z_h = -1;
__constant__ int xdim2_advec_mom_kernel2_z;
int xdim2_advec_mom_kernel2_z_h = -1;
__constant__ int ydim2_advec_mom_kernel2_z;
int ydim2_advec_mom_kernel2_z_h = -1;
__constant__ int xdim3_advec_mom_kernel2_z;
int xdim3_advec_mom_kernel2_z_h = -1;
__constant__ int ydim3_advec_mom_kernel2_z;
int ydim3_advec_mom_kernel2_z_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel2_z * (y) + \
xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel2_z * (y) + \
xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel2_z * (y) + \
xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel2_z * (y) + \
xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z * (z))
// user function
__device__
inline void
advec_mom_kernel2_z_gpu(double *vel1, const double *node_mass_post,
const double *node_mass_pre,
const double *mom_flux) {
vel1[OPS_ACC0(0, 0, 0)] =
(vel1[OPS_ACC0(0, 0, 0)] * node_mass_pre[OPS_ACC2(0, 0, 0)] +
mom_flux[OPS_ACC3(0, 0, -1)] - mom_flux[OPS_ACC3(0, 0, 0)]) /
node_mass_post[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_mom_kernel2_z(double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel2_z_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
#else
void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 4, range, 138))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(138, "advec_mom_kernel2_z");
OPS_kernels[138].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel2_z_h ||
ydim0 != ydim0_advec_mom_kernel2_z_h ||
xdim1 != xdim1_advec_mom_kernel2_z_h ||
ydim1 != ydim1_advec_mom_kernel2_z_h ||
xdim2 != xdim2_advec_mom_kernel2_z_h ||
ydim2 != ydim2_advec_mom_kernel2_z_h ||
xdim3 != xdim3_advec_mom_kernel2_z_h ||
ydim3 != ydim3_advec_mom_kernel2_z_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel2_z, &xdim0, sizeof(int));
xdim0_advec_mom_kernel2_z_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel2_z, &ydim0, sizeof(int));
ydim0_advec_mom_kernel2_z_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel2_z, &xdim1, sizeof(int));
xdim1_advec_mom_kernel2_z_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel2_z, &ydim1, sizeof(int));
ydim1_advec_mom_kernel2_z_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_mom_kernel2_z, &xdim2, sizeof(int));
xdim2_advec_mom_kernel2_z_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_mom_kernel2_z, &ydim2, sizeof(int));
ydim2_advec_mom_kernel2_z_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_mom_kernel2_z, &xdim3, sizeof(int));
xdim3_advec_mom_kernel2_z_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_mom_kernel2_z, &ydim3, sizeof(int));
ydim3_advec_mom_kernel2_z_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[138].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel2_z), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[138].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[138].mpi_time += t2 - t1;
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 138;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 138;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_advec_mom_kernel2_z_execute;
if (OPS_diags > 1) {
ops_timing_realloc(138, "advec_mom_kernel2_z");
}
ops_enqueue_kernel(desc);
}
#endif
| caec61bc53165499c247c3caccdf934bfd5bb5fa.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel2_z;
int xdim0_advec_mom_kernel2_z_h = -1;
__constant__ int ydim0_advec_mom_kernel2_z;
int ydim0_advec_mom_kernel2_z_h = -1;
__constant__ int xdim1_advec_mom_kernel2_z;
int xdim1_advec_mom_kernel2_z_h = -1;
__constant__ int ydim1_advec_mom_kernel2_z;
int ydim1_advec_mom_kernel2_z_h = -1;
__constant__ int xdim2_advec_mom_kernel2_z;
int xdim2_advec_mom_kernel2_z_h = -1;
__constant__ int ydim2_advec_mom_kernel2_z;
int ydim2_advec_mom_kernel2_z_h = -1;
__constant__ int xdim3_advec_mom_kernel2_z;
int xdim3_advec_mom_kernel2_z_h = -1;
__constant__ int ydim3_advec_mom_kernel2_z;
int ydim3_advec_mom_kernel2_z_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel2_z * (y) + \
xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel2_z * (y) + \
xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel2_z * (y) + \
xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel2_z * (y) + \
xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z * (z))
// user function
__device__
inline void
advec_mom_kernel2_z_gpu(double *vel1, const double *node_mass_post,
const double *node_mass_pre,
const double *mom_flux) {
vel1[OPS_ACC0(0, 0, 0)] =
(vel1[OPS_ACC0(0, 0, 0)] * node_mass_pre[OPS_ACC2(0, 0, 0)] +
mom_flux[OPS_ACC3(0, 0, -1)] - mom_flux[OPS_ACC3(0, 0, 0)]) /
node_mass_post[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_mom_kernel2_z(double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel2_z_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
#else
void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 4, range, 138))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(138, "advec_mom_kernel2_z");
OPS_kernels[138].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel2_z_h ||
ydim0 != ydim0_advec_mom_kernel2_z_h ||
xdim1 != xdim1_advec_mom_kernel2_z_h ||
ydim1 != ydim1_advec_mom_kernel2_z_h ||
xdim2 != xdim2_advec_mom_kernel2_z_h ||
ydim2 != ydim2_advec_mom_kernel2_z_h ||
xdim3 != xdim3_advec_mom_kernel2_z_h ||
ydim3 != ydim3_advec_mom_kernel2_z_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel2_z, &xdim0, sizeof(int));
xdim0_advec_mom_kernel2_z_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel2_z, &ydim0, sizeof(int));
ydim0_advec_mom_kernel2_z_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel2_z, &xdim1, sizeof(int));
xdim1_advec_mom_kernel2_z_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel2_z, &ydim1, sizeof(int));
ydim1_advec_mom_kernel2_z_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_mom_kernel2_z, &xdim2, sizeof(int));
xdim2_advec_mom_kernel2_z_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_mom_kernel2_z, &ydim2, sizeof(int));
ydim2_advec_mom_kernel2_z_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_mom_kernel2_z, &xdim3, sizeof(int));
xdim3_advec_mom_kernel2_z_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_mom_kernel2_z, &ydim3, sizeof(int));
ydim3_advec_mom_kernel2_z_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[138].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel2_z<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[138].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[138].mpi_time += t2 - t1;
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 138;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 138;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_advec_mom_kernel2_z_execute;
if (OPS_diags > 1) {
ops_timing_realloc(138, "advec_mom_kernel2_z");
}
ops_enqueue_kernel(desc);
}
#endif
|
253aa35d0447b1dea21129e9fb2c02fde045ea7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//////////////////////////////////////////////////////////////////////////////
// CUDA exercise 1: Basics
//////////////////////////////////////////////////////////////////////////////
// includes
#include <stdio.h>
#include <helper_timer.h>
#include <helper_error.h>
//////////////////////////////////////////////////////////////////////////////
// Kernel (for part 2 of the exercise)
//////////////////////////////////////////////////////////////////////////////
__global__ void multiplyKernel(int *d_a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = d_a[idx] * 2;
}
//////////////////////////////////////////////////////////////////////////////
// Main function
//////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
//////// Declarations ////////////////////////////////////////////
// pointers to host memory
int *h_a_cpu; // pointer to CPU data
int *h_a_gpu; // pointer to GPU data on host
// pointer to device memory
int *d_a; // pointer to GPU data on device
// define grid and block sizes
int numBlocks = 16;
int numThreadsPerBlock = 16;
// number of elements and memory size
int nElem = numBlocks * numThreadsPerBlock;
size_t memSize = nElem * sizeof(int);
//////// Allocate Memory ///////////////////////////////////////
// Allocate host memory
h_a_cpu = (int *) malloc(memSize);
h_a_gpu = (int *) malloc(memSize);
// Allocate device memory
CUDA_CHECK(hipMalloc( &d_a, memSize));
//////// Initialize host memory ////////////////////////////////
for (int i = 0; i < nElem; i = i + 1) {
h_a_cpu[i] = 5 * i + 17;
}
//////// Copy Data From Host To Device /////////////////////////
hipMemcpy( d_a, h_a_cpu, memSize, hipMemcpyHostToDevice );
//////// Calculation (Host) ////////////////////////////////////
for (int i = 0; i < nElem; i++) {
h_a_cpu[i] = 2 * h_a_cpu[i];
}
//////// Calculation (Device) //////////////////////////////////
dim3 dimGrid(numBlocks) ;
dim3 dimBlock(numThreadsPerBlock);
// block until the device has completed
CUDA_CHECK_KERNEL(hipLaunchKernelGGL(( multiplyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a));
// End of region for part 2 of the exercise ----------------
//
//////// Copy Data From Device To Host /////////////////////////
hipMemcpy( h_a_gpu, d_a, memSize, hipMemcpyDeviceToHost );
//////// Compare the results ///////////////////////////////////
for (int i = 0; i < nElem; i++) {
if (h_a_gpu[i] != h_a_cpu[i]) {
printf("Error: i=%d h_a_gpu[i]=%d h_a_cpu[i]=%d\n", i,
h_a_gpu[i], h_a_cpu[i]);
return 1;
}
}
//////// Free Memory ///////////////////////////////////////////
// free device memory
hipFree(d_a) ;
// free host memory
free(h_a_cpu);
free(h_a_gpu);
//////// End ///////////////////////////////////////////////////
printf("Success\n");
return 0;
}
| 253aa35d0447b1dea21129e9fb2c02fde045ea7f.cu | //////////////////////////////////////////////////////////////////////////////
// CUDA exercise 1: Basics
//////////////////////////////////////////////////////////////////////////////
// includes
#include <stdio.h>
#include <helper_timer.h>
#include <helper_error.h>
//////////////////////////////////////////////////////////////////////////////
// Kernel (for part 2 of the exercise)
//////////////////////////////////////////////////////////////////////////////
__global__ void multiplyKernel(int *d_a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = d_a[idx] * 2;
}
//////////////////////////////////////////////////////////////////////////////
// Main function
//////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
//////// Declarations ////////////////////////////////////////////
// pointers to host memory
int *h_a_cpu; // pointer to CPU data
int *h_a_gpu; // pointer to GPU data on host
// pointer to device memory
int *d_a; // pointer to GPU data on device
// define grid and block sizes
int numBlocks = 16;
int numThreadsPerBlock = 16;
// number of elements and memory size
int nElem = numBlocks * numThreadsPerBlock;
size_t memSize = nElem * sizeof(int);
//////// Allocate Memory ///////////////////////////////////////
// Allocate host memory
h_a_cpu = (int *) malloc(memSize);
h_a_gpu = (int *) malloc(memSize);
// Allocate device memory
CUDA_CHECK(cudaMalloc( &d_a, memSize));
//////// Initialize host memory ////////////////////////////////
for (int i = 0; i < nElem; i = i + 1) {
h_a_cpu[i] = 5 * i + 17;
}
//////// Copy Data From Host To Device /////////////////////////
cudaMemcpy( d_a, h_a_cpu, memSize, cudaMemcpyHostToDevice );
//////// Calculation (Host) ////////////////////////////////////
for (int i = 0; i < nElem; i++) {
h_a_cpu[i] = 2 * h_a_cpu[i];
}
//////// Calculation (Device) //////////////////////////////////
dim3 dimGrid(numBlocks) ;
dim3 dimBlock(numThreadsPerBlock);
// block until the device has completed
CUDA_CHECK_KERNEL( multiplyKernel<<<dimGrid, dimBlock>>>(d_a));
// End of region for part 2 of the exercise ----------------
//
//////// Copy Data From Device To Host /////////////////////////
cudaMemcpy( h_a_gpu, d_a, memSize, cudaMemcpyDeviceToHost );
//////// Compare the results ///////////////////////////////////
for (int i = 0; i < nElem; i++) {
if (h_a_gpu[i] != h_a_cpu[i]) {
printf("Error: i=%d h_a_gpu[i]=%d h_a_cpu[i]=%d\n", i,
h_a_gpu[i], h_a_cpu[i]);
return 1;
}
}
//////// Free Memory ///////////////////////////////////////////
// free device memory
cudaFree(d_a) ;
// free host memory
free(h_a_cpu);
free(h_a_gpu);
//////// End ///////////////////////////////////////////////////
printf("Success\n");
return 0;
}
|
d706be793a5a6b865f823bbf114150d8c2021914.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlarft_kernels.cu, normal z -> s, Tue Aug 30 09:38:31 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define sgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ float shared_data[];
/******************************************************************************/
static __device__
void slarft_gemvcolwise_device(
int m, float *v, float *tau,
float *c, int ldc, float *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
float *dc = c + blockIdx.x * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_CONJ(sum[0]);
#else
tmp = - MAGMA_S_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_S_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_S_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_S_ZERO;
}
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel( int m, float *v, int ldv, float *tau,
float *T, int ldt, int step )
{
slarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel_batched( int m, float **v_array, int ldv, float **tau_array,
float **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
slarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise(
magma_int_t m, magma_int_t step,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( slarft_gemvcolwise_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v, ldv, tau, T, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
float **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( slarft_gemvcolwise_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v_array, ldv, tau_array, T_array, ldt, step);
}
/******************************************************************************/
// sgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
slarft_gemvrowwise_device(
int m, int i,
float *tau,
float *v_ptr, int ldv,
float *x_ptr, int incx,
float *T_ptr, int ldt,
float *W, float* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
float res = MAGMA_S_ZERO;
v_ptr += ldv * ty;
if (tx < sgemv_bs)
{
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0] * (*tau);
}
#endif
}
/******************************************************************************/
// T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
// T(i,i) = tau(i)
__global__ void
slarft_gemvrowwise_kernel(
int m, int i,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
float *W = T +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
__global__ void
slarft_gemvrowwise_kernel_batched(
int m, int i,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
float *W = T_array[batchid] +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise(
magma_int_t m, magma_int_t i,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
hipLaunchKernelGGL(( slarft_gemvrowwise_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
/* sgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
hipLaunchKernelGGL(( slarft_gemvrowwise_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
/*
loop_inside
*/
static __device__ void
slarft_gemv_loop_inside_device(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
float *sdata = (float*)shared_data;
float res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
float *v_ptr = v;
v_ptr += i;
float *x_ptr = v_ptr + i * ldv;
res = MAGMA_S_ZERO;
if (tx < sgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * sgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * sgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
slarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel_batched(
int n, int k,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
slarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
hipLaunchKernelGGL(( slarft_gemv_loop_inside_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
hipLaunchKernelGGL(( slarft_gemv_loop_inside_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
static __device__ void
slarft_strmv_sm32x32_device(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_S_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
slarft_strmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel_batched(
int n, int k, float **tau_array,
float **Tin_array, int ldtin, float **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
slarft_strmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Tin, magma_int_t ldtin,
float *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
hipLaunchKernelGGL(( slarft_strmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Tin_array, magma_int_t ldtin,
float **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
hipLaunchKernelGGL(( slarft_strmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
/******************************************************************************/
static __device__ void
slarft_recstrmv_sm32x32_device(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_S_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
slarft_recstrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel_batched(
int m, int n, float **tau_array,
float **Trec_array, int ldtrec, float **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
slarft_recstrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Trec, magma_int_t ldtrec,
float *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
hipLaunchKernelGGL(( slarft_recstrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Trec_array, magma_int_t ldtrec,
float **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
hipLaunchKernelGGL(( slarft_recstrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
| d706be793a5a6b865f823bbf114150d8c2021914.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlarft_kernels.cu, normal z -> s, Tue Aug 30 09:38:31 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define sgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ float shared_data[];
/******************************************************************************/
static __device__
void slarft_gemvcolwise_device(
int m, float *v, float *tau,
float *c, int ldc, float *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
float *dc = c + blockIdx.x * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_CONJ(sum[0]);
#else
tmp = - MAGMA_S_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_S_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_S_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_S_ZERO;
}
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel( int m, float *v, int ldv, float *tau,
float *T, int ldt, int step )
{
slarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel_batched( int m, float **v_array, int ldv, float **tau_array,
float **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
slarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise(
magma_int_t m, magma_int_t step,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
slarft_gemvcolwise_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v, ldv, tau, T, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
float **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
slarft_gemvcolwise_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v_array, ldv, tau_array, T_array, ldt, step);
}
/******************************************************************************/
// sgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
slarft_gemvrowwise_device(
int m, int i,
float *tau,
float *v_ptr, int ldv,
float *x_ptr, int incx,
float *T_ptr, int ldt,
float *W, float* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
float res = MAGMA_S_ZERO;
v_ptr += ldv * ty;
if (tx < sgemv_bs)
{
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0] * (*tau);
}
#endif
}
/******************************************************************************/
// T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
// T(i,i) = tau(i)
__global__ void
slarft_gemvrowwise_kernel(
int m, int i,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
float *W = T +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
__global__ void
slarft_gemvrowwise_kernel_batched(
int m, int i,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
float *W = T_array[batchid] +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise(
magma_int_t m, magma_int_t i,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
slarft_gemvrowwise_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
/* sgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
slarft_gemvrowwise_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
/*
loop_inside
*/
static __device__ void
slarft_gemv_loop_inside_device(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
float *sdata = (float*)shared_data;
float res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
float *v_ptr = v;
v_ptr += i;
float *x_ptr = v_ptr + i * ldv;
res = MAGMA_S_ZERO;
if (tx < sgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * sgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * sgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
slarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel_batched(
int n, int k,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
slarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
slarft_gemv_loop_inside_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
slarft_gemv_loop_inside_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
static __device__ void
slarft_strmv_sm32x32_device(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_S_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
slarft_strmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel_batched(
int n, int k, float **tau_array,
float **Tin_array, int ldtin, float **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
slarft_strmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Tin, magma_int_t ldtin,
float *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
slarft_strmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Tin_array, magma_int_t ldtin,
float **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
slarft_strmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
/******************************************************************************/
static __device__ void
slarft_recstrmv_sm32x32_device(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_S_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
slarft_recstrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel_batched(
int m, int n, float **tau_array,
float **Trec_array, int ldtrec, float **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
slarft_recstrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Trec, magma_int_t ldtrec,
float *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
slarft_recstrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Trec_array, magma_int_t ldtrec,
float **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
slarft_recstrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
|
8fb43e950a2232e35b035d659aab9edc858482d2.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _WARPS_V_PERF_
#define _WARPS_V_PERF_
// Define Defaults ------------------------------------------------------------
// Hardware Dependent - NV GeForce 9500 GT
#define NUM_THREADS_PER_BLOCK 0 // Gets iterated over
#define ARRAY_SIZE 0 // Changed per iteration
#define NUM_BLOCKS 4
#define N_THREADS_PER_WARP 32
#define NUM_LOOPS 16
// includes -------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "kernel.hip" // Kernel to Maximize FLOPS
#include "common/helpers.c"
// Forward Declarations --------------------------------------------------------
float runTest(int num_blocks);
// Main -----------------------------------------------------------------------
int main( int argc, char** argv) {
printf("Testing Number of Warps vs. Performance\n");
printf("Written by Phil Monroe and Kramer Straube\n");
printf("\n");
FILE *file;
file = fopen("out.csv","a+");
for(int iter = 0; iter < NUM_LOOPS; ++iter){
printf("Iteration %d\n", iter);
float perf = runTest((iter+1) * N_THREADS_PER_WARP);
fprintf(file, "%d, %d, %f\n", iter, iter+1, perf);
}
fclose(file);
exit(0);
}
// runTest --------------------------------------------------------------------
// Runs a simple test to determine the Blocks per Second computed for a given
// number of blocks
//
float runTest( int num_threads) {
printf("Testing %4d Threads\n", num_threads);
// Initialize counters on host and device to 0.0f
float *h_counters, *d_counters;
initArray(&h_counters, &d_counters, num_threads);
// Create and Start Timer
hipEvent_t start, stop;
startTest(start, stop);
// Run the test
hipLaunchKernelGGL(( warps_v_perf_kernel), dim3(NUM_BLOCKS), dim3(num_threads), 0, 0, d_counters);
// Get the time elapsed
float time_s = finishTest(start, stop);
// Calculate Performance
unsigned long long total_flops = (long long)N_FLOPS_PER_KERNEL * (long long) NUM_BLOCKS * num_threads;
printf("Total FLOPs: %lld\n", total_flops);
float gflops = total_flops/(time_s*1000000000.0f);
printf("GFLOPS: %.3f\n", gflops);
float perf = num_threads/(time_s* 1000.0f);
printf("Threads per Sec.: %.3f KThreads/s\n", perf);
printf("\n");
// Cleanup
free(h_counters);
hipFree(d_counters);
return gflops;
}
#endif /* _WARPS_V_PERF_ */
| 8fb43e950a2232e35b035d659aab9edc858482d2.cu | #ifndef _WARPS_V_PERF_
#define _WARPS_V_PERF_
// Define Defaults ------------------------------------------------------------
// Hardware Dependent - NV GeForce 9500 GT
#define NUM_THREADS_PER_BLOCK 0 // Gets iterated over
#define ARRAY_SIZE 0 // Changed per iteration
#define NUM_BLOCKS 4
#define N_THREADS_PER_WARP 32
#define NUM_LOOPS 16
// includes -------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "kernel.cu" // Kernel to Maximize FLOPS
#include "common/helpers.c"
// Forward Declarations --------------------------------------------------------
float runTest(int num_blocks);
// Main -----------------------------------------------------------------------
int main( int argc, char** argv) {
printf("Testing Number of Warps vs. Performance\n");
printf("Written by Phil Monroe and Kramer Straube\n");
printf("\n");
FILE *file;
file = fopen("out.csv","a+");
for(int iter = 0; iter < NUM_LOOPS; ++iter){
printf("Iteration %d\n", iter);
float perf = runTest((iter+1) * N_THREADS_PER_WARP);
fprintf(file, "%d, %d, %f\n", iter, iter+1, perf);
}
fclose(file);
exit(0);
}
// runTest --------------------------------------------------------------------
// Runs a simple test to determine the Blocks per Second computed for a given
// number of blocks
//
float runTest( int num_threads) {
printf("Testing %4d Threads\n", num_threads);
// Initialize counters on host and device to 0.0f
float *h_counters, *d_counters;
initArray(&h_counters, &d_counters, num_threads);
// Create and Start Timer
cudaEvent_t start, stop;
startTest(start, stop);
// Run the test
warps_v_perf_kernel<<< NUM_BLOCKS, num_threads>>>(d_counters);
// Get the time elapsed
float time_s = finishTest(start, stop);
// Calculate Performance
unsigned long long total_flops = (long long)N_FLOPS_PER_KERNEL * (long long) NUM_BLOCKS * num_threads;
printf("Total FLOPs: %lld\n", total_flops);
float gflops = total_flops/(time_s*1000000000.0f);
printf("GFLOPS: %.3f\n", gflops);
float perf = num_threads/(time_s* 1000.0f);
printf("Threads per Sec.: %.3f KThreads/s\n", perf);
printf("\n");
// Cleanup
free(h_counters);
cudaFree(d_counters);
return gflops;
}
#endif /* _WARPS_V_PERF_ */
|
224a43033dc80a896d7d924159731506d6c766e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
// return GB/sec
float GBPerSec(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
// This is the CUDA "kernel" function that is run on the GPU. You
// know this because it is marked as a __global__ function.
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// this check is necessary to make the code work for values of N
// that are not a multiple of the thread block size (blockDim.x)
if (index < N)
result[index] = alpha * x[index] + y[index];
}
// saxpyCuda --
//
// This function is regular C code running on the CPU. It allocates
// memory on the GPU using CUDA API functions, uses CUDA API functions
// to transfer data from the CPU's memory address space to GPU memory
// address space, and launches the CUDA kernel function on the GPU.
void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
// must read both input arrays (xarray and yarray) and write to
// output array (resultarray)
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block. In this
// application we've hardcoded thread blocks to contain 512 CUDA
// threads.
const int threadsPerBlock = 512;
// Notice the round up here. The code needs to compute the number
// of threads blocks needed such that there is one thread per
// element of the arrays. This code is written to work for values
// of N that are not multiples of threadPerBlock.
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
// These are pointers that will be pointers to memory allocated
// *one the GPU*. You should allocate these pointers via
// hipMalloc. You can access the resulting buffers from CUDA
// device kernel code (see the kernel function saxpy_kernel()
// above) but you cannot access the contents these buffers from
// this thread. CPU threads cannot issue loads and stores from GPU
// memory!
float* device_x;
float* device_y;
float* device_result;
//
// CS149 TODO: allocate device memory buffers on the GPU using hipMalloc.
//
// We highly recommend taking a look at NVIDIA's
// tutorial, which clearly walks you through the few lines of code
// you need to write for this part of the assignment:
//
// https://devblogs.nvidia.com/easy-introduction-cuda-c-and-c/
//
hipMalloc(&device_x, N*sizeof(float));
hipMalloc(&device_y, N*sizeof(float));
hipMalloc(&device_result, N*sizeof(float));
// start timing after allocation of device memory
double startTime = CycleTimer::currentSeconds();
//
// CS149 TODO: copy input arrays to the GPU using hipMemcpy
//
hipMemcpy(device_x, xarray, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_y, yarray, N*sizeof(float), hipMemcpyHostToDevice);
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured for memory alloc: code=%d, %s\n",
errCode, hipGetErrorString(errCode));
}
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
printf("Effective BW by CUDA saxpy: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration,
GBPerSec(totalBytes/3*2, overallDuration));
// run CUDA kernel. (notice the <<< >>> brackets indicating a CUDA
// kernel launch) Execution on the GPU occurs here.
double kernelStartTime = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result);
hipDeviceSynchronize();
double kernelDuration = CycleTimer::currentSeconds() - kernelStartTime;
//
// CS149 TODO: copy result from GPU back to CPU using hipMemcpy
//
hipDeviceSynchronize();
startTime = CycleTimer::currentSeconds();
hipMemcpy(resultarray, device_result, N*sizeof(float), hipMemcpyDeviceToHost);
// end timing after result has been copied back into host memory
endTime = CycleTimer::currentSeconds();
errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, hipGetErrorString(errCode));
}
overallDuration = endTime - startTime;
printf("Effective BW by CUDA saxpy: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration,
GBPerSec(totalBytes/3, overallDuration));
printf("Kernel Runtime by CUDA saxpy: %.3f ms\n", 1000.f * kernelDuration);
//
// CS149 TODO: free memory buffers on the GPU using hipFree
//
hipFree(device_x); hipFree(device_y); hipFree(device_result);
}
void printCudaInfo() {
// print out stats about the GPU in the machine. Useful if
// students want to know what GPU they are running on.
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| 224a43033dc80a896d7d924159731506d6c766e3.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
// return GB/sec
float GBPerSec(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
// This is the CUDA "kernel" function that is run on the GPU. You
// know this because it is marked as a __global__ function.
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// this check is necessary to make the code work for values of N
// that are not a multiple of the thread block size (blockDim.x)
if (index < N)
result[index] = alpha * x[index] + y[index];
}
// saxpyCuda --
//
// This function is regular C code running on the CPU. It allocates
// memory on the GPU using CUDA API functions, uses CUDA API functions
// to transfer data from the CPU's memory address space to GPU memory
// address space, and launches the CUDA kernel function on the GPU.
void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
// must read both input arrays (xarray and yarray) and write to
// output array (resultarray)
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block. In this
// application we've hardcoded thread blocks to contain 512 CUDA
// threads.
const int threadsPerBlock = 512;
// Notice the round up here. The code needs to compute the number
// of threads blocks needed such that there is one thread per
// element of the arrays. This code is written to work for values
// of N that are not multiples of threadPerBlock.
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
// These are pointers that will be pointers to memory allocated
// *one the GPU*. You should allocate these pointers via
// cudaMalloc. You can access the resulting buffers from CUDA
// device kernel code (see the kernel function saxpy_kernel()
// above) but you cannot access the contents these buffers from
// this thread. CPU threads cannot issue loads and stores from GPU
// memory!
float* device_x;
float* device_y;
float* device_result;
//
// CS149 TODO: allocate device memory buffers on the GPU using cudaMalloc.
//
// We highly recommend taking a look at NVIDIA's
// tutorial, which clearly walks you through the few lines of code
// you need to write for this part of the assignment:
//
// https://devblogs.nvidia.com/easy-introduction-cuda-c-and-c/
//
cudaMalloc(&device_x, N*sizeof(float));
cudaMalloc(&device_y, N*sizeof(float));
cudaMalloc(&device_result, N*sizeof(float));
// start timing after allocation of device memory
double startTime = CycleTimer::currentSeconds();
//
// CS149 TODO: copy input arrays to the GPU using cudaMemcpy
//
cudaMemcpy(device_x, xarray, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_y, yarray, N*sizeof(float), cudaMemcpyHostToDevice);
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured for memory alloc: code=%d, %s\n",
errCode, cudaGetErrorString(errCode));
}
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
printf("Effective BW by CUDA saxpy: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration,
GBPerSec(totalBytes/3*2, overallDuration));
// run CUDA kernel. (notice the <<< >>> brackets indicating a CUDA
// kernel launch) Execution on the GPU occurs here.
double kernelStartTime = CycleTimer::currentSeconds();
saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result);
cudaDeviceSynchronize();
double kernelDuration = CycleTimer::currentSeconds() - kernelStartTime;
//
// CS149 TODO: copy result from GPU back to CPU using cudaMemcpy
//
cudaThreadSynchronize();
startTime = CycleTimer::currentSeconds();
cudaMemcpy(resultarray, device_result, N*sizeof(float), cudaMemcpyDeviceToHost);
// end timing after result has been copied back into host memory
endTime = CycleTimer::currentSeconds();
errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, cudaGetErrorString(errCode));
}
overallDuration = endTime - startTime;
printf("Effective BW by CUDA saxpy: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration,
GBPerSec(totalBytes/3, overallDuration));
printf("Kernel Runtime by CUDA saxpy: %.3f ms\n", 1000.f * kernelDuration);
//
// CS149 TODO: free memory buffers on the GPU using cudaFree
//
cudaFree(device_x); cudaFree(device_y); cudaFree(device_result);
}
void printCudaInfo() {
// print out stats about the GPU in the machine. Useful if
// students want to know what GPU they are running on.
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
2929576371751ecc0875103740c1f9cc83c1091f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
//__constant__ int c_CDF[1000];
texture<int,1,hipReadModeElementType> tex_arrayX;
texture<int,1,hipReadModeElementType> tex_arrayY;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
__shared__ double s_u[128];
s_u[threadIdx.x]=u[i];
__syncthreads();
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= s_u[threadIdx.x]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] =arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
// hipMalloc((void **)&c_CDF,sizeof(double)*Nparticles);
hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
//hipMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
double* Address;
hipBindTexture(0,tex_arrayX,arrayX,sizeof(double)*Nparticles);
hipBindTexture(0,tex_arrayY,arrayY,sizeof(double)*Nparticles);
//hipGetSymbolAddress((void **)&Address,c_CDF);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
hipDeviceSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
hipFree(u_GPU);
hipFree(CDF_GPU);
hipFree(yj_GPU);
hipFree(xj_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 2929576371751ecc0875103740c1f9cc83c1091f.cu | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
//__constant__ int c_CDF[1000];
texture<int,1,cudaReadModeElementType> tex_arrayX;
texture<int,1,cudaReadModeElementType> tex_arrayY;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
__shared__ double s_u[128];
s_u[threadIdx.x]=u[i];
__syncthreads();
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= s_u[threadIdx.x]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] =arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
// cudaMalloc((void **)&c_CDF,sizeof(double)*Nparticles);
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
//cudaMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
double* Address;
cudaBindTexture(0,tex_arrayX,arrayX,sizeof(double)*Nparticles);
cudaBindTexture(0,tex_arrayY,arrayY,sizeof(double)*Nparticles);
//cudaGetSymbolAddress((void **)&Address,c_CDF);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
cudaThreadSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
cudaFree(u_GPU);
cudaFree(CDF_GPU);
cudaFree(yj_GPU);
cudaFree(xj_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
fe617185a5c8bb27ea050ddf85ffeb8d005731ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
1.Input Data
2.What Need to be calculated
3.Design your threads and thread blocks
4. Implementation on CPU and GPU
5. Built in check points
6. Output data
*/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
#define NumberOfELements 100000
#define PI 3.14159265
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
int line_length(FILE *input_file){
char read_lines[100];
int total_lines = 0;
while(fgets(read_lines, 100, input_file) != NULL) total_lines++;
rewind(input_file);
return(total_lines);
}
void get_data(FILE *input_file, int n_lines, float *asc, float *decl){
char read_lines[100];
float right_asc, declin;
int i=0;
while(fgets(read_lines, 100, input_file) != NULL){
sscanf(read_lines, "%f %f", &right_asc, &declin);
asc[i] = right_asc * PI/ (60 * 180);
decl[i] = declin * PI/ (60 * 180);
++i;
}
fclose(input_file);
}
__global__ void histogram_calc(float *rt_rl, float *decn_rl, float *rt_syc, float *decn_syc, float pi, unsigned long long int *histogram){
float galxs_rdns;
float galxs_dgrs;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < NumberOfELements)
for( int i = 0; i < NumberOfELements; ++i){
galxs_rdns = acos(sin(decn_rl[index]) * sin(decn_syc[i]) + cos(decn_rl[index]) * cos(decn_syc[i]) * cos(rt_rl[index] - rt_syc[i]));
galxs_dgrs = galxs_rdns * (180 /pi);
// histogram[(int)(galxs_dgrs*4)] = (histogram[(int)(galxs_dgrs*4)] + 1);
atomicAdd(&histogram[(int)(galxs_dgrs*4)], 1);
__syncthreads();
}
}
int main(int argc, char *argv[]) {
FILE *input_file, *output_file;
unsigned long long int *DD, *DR, *RR;
int total_lines_r, total_lines_s;
float *right_ascension_real, *declination_real, *right_ascension_synthetic, *declination_synthetic;
long int sum_DD, sum_DR, sum_RR;
float *d_DC, *d_DR, *d_RR, *d_RC;
double omg = 0.00;
int bin_width = 4;
int degrees = 180;
int num_of_bins =
num_of_bins = bin_width * degrees;
time_t start, stop;
/* Check that we have 4 command line arguments */
if ( argc != 4 ) {
printf("Usage: %s real_data synthetic_data output_file\n", argv[0]);
return(0);
}
start = clock();
//open real data file
input_file = fopen(argv[1], "r");
if (input_file == NULL){
printf("file does not exist%s\n", argv[1]);
return 0;
}
// count lines in a real file
total_lines_r = line_length(input_file);
//printf("%s contains %d lines\n", argv[1], total_lines_r);
//alocate memory for real data on host
right_ascension_real = (float *)calloc(total_lines_r, sizeof(float));
declination_real = (float *)calloc(total_lines_r, sizeof(float));
//get data
get_data(input_file, total_lines_r, right_ascension_real, declination_real);
//open synthetic data
input_file = fopen(argv[2], "r");
if (input_file == NULL){
printf("file does not exist%s\n", argv[2]);
return 0;
}
//count lines in sysnthetic file
total_lines_s = line_length(input_file);
// printf("%s contains %d lines\n", argv[2], total_lines_s);
//alocate memory for the sysnthetic data on host
right_ascension_synthetic = (float *)calloc(total_lines_s, sizeof(float));
declination_synthetic = (float *)calloc(total_lines_s, sizeof(float));
//get second data
get_data(input_file, total_lines_s,right_ascension_synthetic, declination_synthetic);
// where data is stored
long int *host_DD;
long int *host_DR;
long int *host_RR;
//Alocate memory for the host
host_DD = (long int *)malloc((num_of_bins+1) * sizeof(long int));
host_DR = (long int *)malloc((num_of_bins+1) * sizeof(long int));
host_RR = (long int *)malloc((num_of_bins+1) * sizeof(long int));
for (int i = 0; i <= num_of_bins; ++i ) {
host_DD[i] = 0L;
host_DR[i] = 0L;
host_RR[i] = 0L;
}
//Allocate device memory
hipMalloc((void **)&DD, (NumberOfELements+1) * sizeof(unsigned long long int));
hipMalloc((void **)&DR, (NumberOfELements+1) * sizeof(unsigned long long int));
hipMalloc((void **)&RR, (NumberOfELements+1) * sizeof(unsigned long long int));
hipMalloc((void **)&d_DR, (NumberOfELements+1) * sizeof(float));
hipMalloc((void **)&d_DC, (NumberOfELements+1) * sizeof(float));
hipMalloc((void **)&d_RR, (NumberOfELements+1) * sizeof(float));
hipMalloc((void **)&d_RC, (NumberOfELements+1) * sizeof(float));
//copy the data from host memory to device memory
hipMemcpy(d_DR, right_ascension_real, (NumberOfELements) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_DC, declination_real, (NumberOfELements) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_RR, right_ascension_synthetic, (NumberOfELements) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_RC, declination_synthetic, (NumberOfELements) * sizeof(float), hipMemcpyHostToDevice);
//Lauch the kernel for DD
int blockSize = 256;
int numBlocks = ((NumberOfELements -1) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( histogram_calc) , dim3(numBlocks), dim3(blockSize), 0, 0, d_DR, d_DC, d_DR, d_DC,PI, DD);
hipDeviceSynchronize();
//copy the results back to the host
hipMemcpy(host_DD, DD, num_of_bins * sizeof(long int), hipMemcpyDeviceToHost);
sum_DD = 0L;
for (int i = 0; i <= (num_of_bins); ++i )
sum_DD += host_DD[i];
printf("histograms DD = %ld\n", sum_DD);
//Lauch the kernel DR
hipLaunchKernelGGL(( histogram_calc) , dim3(numBlocks), dim3(blockSize), 0, 0, d_DR, d_DC, d_RR, d_RC,PI, DR);
hipDeviceSynchronize();
//copy the results back to the host
hipMemcpy(host_DR, DR, num_of_bins * sizeof(long int), hipMemcpyDeviceToHost);
sum_DR = 0L;
for (int i = 0; i <= num_of_bins; ++i )
sum_DR += host_DR[i];
printf("histograms DR = %ld\n", sum_DR);
//Lauch the kernel RR
hipLaunchKernelGGL(( histogram_calc) , dim3(numBlocks), dim3(blockSize), 0, 0, d_RR, d_RC, d_RR, d_RC, PI, RR);
//copy the results back to the host
hipMemcpy(host_RR, RR, num_of_bins * sizeof(long int), hipMemcpyDeviceToHost);
sum_RR = 0L;
for (int i = 0; i <= num_of_bins; ++i )
sum_RR += host_RR[i];
printf("histograms RR = %ld\n", sum_RR);
/* Open the output file */
output_file = fopen(argv[3],"w");
if ( output_file == NULL ) {
printf("Unable to open %s\n",argv[3]);
return(-1);
}
for(int i = 0; i < num_of_bins; ++i){
if (host_RR[i] > 0 ) {
omg = ((double)host_DD[i]/(double)(host_RR[i])) - ((2.0*host_DR[i])/(double)(host_RR[i])) + ((double)host_RR[i]/(double)(host_RR[i]));
// omg = (double)((host_DD[i] - 2*host_DR[i] + host_RR[i])/host_RR[i]);
printf("Omega = %6.3f\n", omg);
fprintf(output_file, "%6.3f\n", omg);
}
}
fclose(output_file);
free(right_ascension_synthetic);
free(declination_synthetic);
free(right_ascension_real);
free(declination_real);
free(host_DD);
free(host_DR);
free(host_RR);
hipFree(DD);
hipFree(DR);
hipFree(RR);
hipFree(d_RR);
hipFree(d_RC);
hipFree(d_DR);
hipFree(d_DC);
stop = clock();
printf("\nExcution time = %6.1f seconds\n",
((double) (stop-start))/ CLOCKS_PER_SEC);
return (0);
}
| fe617185a5c8bb27ea050ddf85ffeb8d005731ad.cu | /*
1.Input Data
2.What Need to be calculated
3.Design your threads and thread blocks
4. Implementation on CPU and GPU
5. Built in check points
6. Output data
*/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
#define NumberOfELements 100000
#define PI 3.14159265
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
int line_length(FILE *input_file){
char read_lines[100];
int total_lines = 0;
while(fgets(read_lines, 100, input_file) != NULL) total_lines++;
rewind(input_file);
return(total_lines);
}
void get_data(FILE *input_file, int n_lines, float *asc, float *decl){
char read_lines[100];
float right_asc, declin;
int i=0;
while(fgets(read_lines, 100, input_file) != NULL){
sscanf(read_lines, "%f %f", &right_asc, &declin);
asc[i] = right_asc * PI/ (60 * 180);
decl[i] = declin * PI/ (60 * 180);
++i;
}
fclose(input_file);
}
__global__ void histogram_calc(float *rt_rl, float *decn_rl, float *rt_syc, float *decn_syc, float pi, unsigned long long int *histogram){
float galxs_rdns;
float galxs_dgrs;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < NumberOfELements)
for( int i = 0; i < NumberOfELements; ++i){
galxs_rdns = acos(sin(decn_rl[index]) * sin(decn_syc[i]) + cos(decn_rl[index]) * cos(decn_syc[i]) * cos(rt_rl[index] - rt_syc[i]));
galxs_dgrs = galxs_rdns * (180 /pi);
// histogram[(int)(galxs_dgrs*4)] = (histogram[(int)(galxs_dgrs*4)] + 1);
atomicAdd(&histogram[(int)(galxs_dgrs*4)], 1);
__syncthreads();
}
}
int main(int argc, char *argv[]) {
FILE *input_file, *output_file;
unsigned long long int *DD, *DR, *RR;
int total_lines_r, total_lines_s;
float *right_ascension_real, *declination_real, *right_ascension_synthetic, *declination_synthetic;
long int sum_DD, sum_DR, sum_RR;
float *d_DC, *d_DR, *d_RR, *d_RC;
double omg = 0.00;
int bin_width = 4;
int degrees = 180;
int num_of_bins =
num_of_bins = bin_width * degrees;
time_t start, stop;
/* Check that we have 4 command line arguments */
if ( argc != 4 ) {
printf("Usage: %s real_data synthetic_data output_file\n", argv[0]);
return(0);
}
start = clock();
//open real data file
input_file = fopen(argv[1], "r");
if (input_file == NULL){
printf("file does not exist%s\n", argv[1]);
return 0;
}
// count lines in a real file
total_lines_r = line_length(input_file);
//printf("%s contains %d lines\n", argv[1], total_lines_r);
//alocate memory for real data on host
right_ascension_real = (float *)calloc(total_lines_r, sizeof(float));
declination_real = (float *)calloc(total_lines_r, sizeof(float));
//get data
get_data(input_file, total_lines_r, right_ascension_real, declination_real);
//open synthetic data
input_file = fopen(argv[2], "r");
if (input_file == NULL){
printf("file does not exist%s\n", argv[2]);
return 0;
}
//count lines in sysnthetic file
total_lines_s = line_length(input_file);
// printf("%s contains %d lines\n", argv[2], total_lines_s);
//alocate memory for the sysnthetic data on host
right_ascension_synthetic = (float *)calloc(total_lines_s, sizeof(float));
declination_synthetic = (float *)calloc(total_lines_s, sizeof(float));
//get second data
get_data(input_file, total_lines_s,right_ascension_synthetic, declination_synthetic);
// where data is stored
long int *host_DD;
long int *host_DR;
long int *host_RR;
//Alocate memory for the host
host_DD = (long int *)malloc((num_of_bins+1) * sizeof(long int));
host_DR = (long int *)malloc((num_of_bins+1) * sizeof(long int));
host_RR = (long int *)malloc((num_of_bins+1) * sizeof(long int));
for (int i = 0; i <= num_of_bins; ++i ) {
host_DD[i] = 0L;
host_DR[i] = 0L;
host_RR[i] = 0L;
}
//Allocate device memory
cudaMalloc((void **)&DD, (NumberOfELements+1) * sizeof(unsigned long long int));
cudaMalloc((void **)&DR, (NumberOfELements+1) * sizeof(unsigned long long int));
cudaMalloc((void **)&RR, (NumberOfELements+1) * sizeof(unsigned long long int));
cudaMalloc((void **)&d_DR, (NumberOfELements+1) * sizeof(float));
cudaMalloc((void **)&d_DC, (NumberOfELements+1) * sizeof(float));
cudaMalloc((void **)&d_RR, (NumberOfELements+1) * sizeof(float));
cudaMalloc((void **)&d_RC, (NumberOfELements+1) * sizeof(float));
//copy the data from host memory to device memory
cudaMemcpy(d_DR, right_ascension_real, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_DC, declination_real, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_RR, right_ascension_synthetic, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_RC, declination_synthetic, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
//Lauch the kernel for DD
int blockSize = 256;
int numBlocks = ((NumberOfELements -1) + blockSize - 1) / blockSize;
histogram_calc <<<numBlocks, blockSize>>>(d_DR, d_DC, d_DR, d_DC,PI, DD);
cudaDeviceSynchronize();
//copy the results back to the host
cudaMemcpy(host_DD, DD, num_of_bins * sizeof(long int), cudaMemcpyDeviceToHost);
sum_DD = 0L;
for (int i = 0; i <= (num_of_bins); ++i )
sum_DD += host_DD[i];
printf("histograms DD = %ld\n", sum_DD);
//Lauch the kernel DR
histogram_calc <<<numBlocks, blockSize>>>(d_DR, d_DC, d_RR, d_RC,PI, DR);
cudaDeviceSynchronize();
//copy the results back to the host
cudaMemcpy(host_DR, DR, num_of_bins * sizeof(long int), cudaMemcpyDeviceToHost);
sum_DR = 0L;
for (int i = 0; i <= num_of_bins; ++i )
sum_DR += host_DR[i];
printf("histograms DR = %ld\n", sum_DR);
//Lauch the kernel RR
histogram_calc <<<numBlocks, blockSize>>>(d_RR, d_RC, d_RR, d_RC, PI, RR);
//copy the results back to the host
cudaMemcpy(host_RR, RR, num_of_bins * sizeof(long int), cudaMemcpyDeviceToHost);
sum_RR = 0L;
for (int i = 0; i <= num_of_bins; ++i )
sum_RR += host_RR[i];
printf("histograms RR = %ld\n", sum_RR);
/* Open the output file */
output_file = fopen(argv[3],"w");
if ( output_file == NULL ) {
printf("Unable to open %s\n",argv[3]);
return(-1);
}
for(int i = 0; i < num_of_bins; ++i){
if (host_RR[i] > 0 ) {
omg = ((double)host_DD[i]/(double)(host_RR[i])) - ((2.0*host_DR[i])/(double)(host_RR[i])) + ((double)host_RR[i]/(double)(host_RR[i]));
// omg = (double)((host_DD[i] - 2*host_DR[i] + host_RR[i])/host_RR[i]);
printf("Omega = %6.3f\n", omg);
fprintf(output_file, "%6.3f\n", omg);
}
}
fclose(output_file);
free(right_ascension_synthetic);
free(declination_synthetic);
free(right_ascension_real);
free(declination_real);
free(host_DD);
free(host_DR);
free(host_RR);
cudaFree(DD);
cudaFree(DR);
cudaFree(RR);
cudaFree(d_RR);
cudaFree(d_RC);
cudaFree(d_DR);
cudaFree(d_DC);
stop = clock();
printf("\nExcution time = %6.1f seconds\n",
((double) (stop-start))/ CLOCKS_PER_SEC);
return (0);
}
|
c7ea9142876d6bcf3a066f0f39dffb50de68cac7.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************************** \
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\***********************************************************************************/
#define __STDC_FORMAT_MACROS
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <string>
#include <fstream>
#include <algorithm>
#include <map>
#include <sys/time.h>
#include <cupti.h>
#include <sassi/sassi-core.hpp>
#include <sassi/sassi-regs.hpp>
#include <sassi/sassi-memory.hpp>
#include <sassi/sassi-opcodes.h>
#include "sassi_intrinsics.h"
#include "sassi_dictionary.hpp"
#include "sassi_lazyallocator.hpp"
#include "error_injector.h"
// knameCount keeps track of kernel names and the number of invocations of the
// cuda kernels that executed so far during the application execution.
std::map<std::string, int> knameCount;
#if TIMING
struct timeval start, end;
float mTotalTime = 0;
#endif
//////////////////////////////////////////////////////////////////////
// Error injection parameters and related functions
//////////////////////////////////////////////////////////////////////
typedef struct {
bool areParamsReady;
bool errorInjected;
bool writeBeforeRead;
bool readyToInject;
#if INTERVAL_MODE_INJECTION == 1
bool intervalModeReady;
bool intervalModeInjected;
int injIntervalSize;
int injIntervalID;
#endif
char injKernelName[MAX_KNAME_SIZE];
long long injThreadID; // injection thread id
int32_t injKCount;
int32_t injIGID; // arch state id
uint64_t injPC;
unsigned long long injPCCount;
unsigned long long injInstID; // injection inst id
float injOpSeed; // injection operand id seed (random number between 0-1)
uint32_t injBFM; // error model
float injBIDSeed; // bit id seed (random number between 0-1)
} inj_info_t;
__managed__ inj_info_t inj_info;
void reset_inj_info() {
inj_info.areParamsReady = false;
inj_info.errorInjected = false;
inj_info.writeBeforeRead = false;
inj_info.readyToInject = false;
#if INTERVAL_MODE_INJECTION == 1
inj_info.intervalModeReady = false;
inj_info.intervalModeInjected = false;
inj_info.injIntervalSize = 0;
inj_info.injIntervalID = -1;
#endif
inj_info.injThreadID = -1;
inj_info.injKernelName[0] = '\0';
inj_info.injKCount = 0;
inj_info.injIGID = 0; // arch state id
inj_info.injInstID = 0; // instruction id
inj_info.injOpSeed = 0; // destination id seed (float, 0-1)
inj_info.injBIDSeed = 0; // bit location seed (float, 0-1)
inj_info.injBFM = 0; // fault model: single bit flip, all bit flip, random value
}
// for debugging
void print_inj_info() {
#if INTERVAL_MODE_INJECTION != 1
printf("injPC=%lx, injPCCount=%lld, ", inj_info.injPC, inj_info.injPCCount);
#endif
printf("inj_igid=%d, inj_fault_model=%d, inj_inst_id=%lld",
inj_info.injIGID, inj_info.injBFM, inj_info.injInstID);
printf("inj_destination_id=%f, inj_bit_location=%f \n", inj_info.injOpSeed, inj_info.injBIDSeed);
}
// Parse error injection site info from a file. This should be done on host side.
void parse_params(std::string filename) {
reset_inj_info();
std::ifstream ifs (filename.c_str(), std::ifstream::in);
if (ifs.is_open()) {
#if INJ_MODE != RF_INJECTIONS
ifs >> inj_info.injIGID; // arch state id
assert(inj_info.injIGID >=0 && inj_info.injIGID < NUM_INST_TYPES); // ensure that the value is in the expected range
#endif
ifs >> inj_info.injBFM; // fault model: single bit flip, all bit flip, random value
assert(inj_info.injBFM < NUM_BFM_TYPES); // ensure that the value is in the expected range
#if INTERVAL_MODE_INJECTION != 1
// ifs >> inj_info.injKernelName;
// ifs >> inj_info.injKCount;
#else
ifs >> inj_info.injIntervalSize;
ifs >> inj_info.injIntervalID;
#endif
ifs >> std::hex >> inj_info.injPC; // PC
ifs >> std::dec >> inj_info.injPCCount;
// ifs >> inj_info.injInstID; // instruction id
ifs >> inj_info.injOpSeed; // destination id seed (float, 0-1 for inst injections and 0-256 for reg)
#if INJ_MODE != RF_INJECTIONS
assert(inj_info.injOpSeed >=0 && inj_info.injOpSeed < 1.01); // ensure that the value is in the expected range
#else
assert(inj_info.injOpSeed >=0 && inj_info.injOpSeed < 257); // ensure that the value is in the expected range
#endif
ifs >> inj_info.injBIDSeed; // bit location seed (float, 0-1)
assert(inj_info.injBIDSeed >= 0 && inj_info.injBIDSeed < 1.01); // ensure that the value is in the expected range
}
ifs.close();
if (INJ_DEBUG_LIGHT) {
print_inj_info();
}
}
//////////////////////////////////////////////////////////////////////
// Functions for actual error injection
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Input: inMask and injBIDSeed
// Output: outMask that selects which bit to flip among the bits that are 1 in inMask
//////////////////////////////////////////////////////////////////////
__device__ uint8_t get_inj_mask(uint8_t inMask, uint8_t injBIDSeed) {
uint8_t outMask = 1;
uint8_t tempInMask = inMask;
int i, count=0;
for (i=0; i<8; i++) { // counting number of 1s in inMask
if (tempInMask & 0x1 == 1) {
count++;
}
tempInMask = tempInMask >> 1;
}
if (INJ_DEBUG_HEAVY) {
printf(" count = %d \n", count);
}
uint8_t injBID = get_int_inj_id(count, injBIDSeed);
if (INJ_DEBUG_HEAVY) {
printf(" injBID = %d \n", injBID);
}
count = 0;
tempInMask = inMask;
for (i=0; i<8; i++) { // counting number of 1s in inMask
if (tempInMask & 0x1 == 1) {
if (count == injBID)
break;
count++;
}
tempInMask = tempInMask >> 1;
outMask = outMask << 1;
}
if (INJ_DEBUG_HEAVY) {
printf(" inMask=%x, outMask=%x \n", inMask, outMask);
}
return outMask;
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in store instructions
////////////////////////////////////////////////////////////////////////////////////
template <typename T>
__device__ void inject_store_error_t(SASSIAfterParams* ap, SASSIMemoryParams *mp, float injBIDSeed, unsigned long long injInstID, int32_t bitwidth, uint32_t injBFM) {
uint32_t injBID = get_int_inj_id(bitwidth, injBIDSeed);
int64_t addr = mp->GetAddress();
T *memAddr = (T*) addr;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value before=%llx\n", memAddr, *memAddr);
printf(":::Injecting: pc=%llx bbId=%d opcode=%s tid=%d instCount=%lld instType=st%d injBID=%d:::", ap->GetPUPC(), ap->GetBBID(), SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID, bitwidth, injBID);
if (!DUMMY_INJECTION) {
if(injBFM == FLIP_SINGLE_BIT || injBFM == WARP_FLIP_SINGLE_BIT) {
*memAddr = *memAddr ^ ((T)1<<injBID); // actual error injection
} else if (injBFM == FLIP_TWO_BITS || injBFM == WARP_FLIP_TWO_BITS) {
*memAddr = *memAddr ^ ((T)3<<injBID); // actual error injection
} else if (injBFM == RANDOM_VALUE || injBFM == WARP_RANDOM_VALUE) {
*memAddr = ((T)(-1))*injBIDSeed;
} else if (injBFM == ZERO_VALUE || injBFM == WARP_ZERO_VALUE) {
*memAddr = 0;
}
}
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value after=%llx\n", memAddr, *memAddr);
}
__device__ void inject_store128_error_t(SASSIAfterParams* ap, SASSIMemoryParams *mp, float injBIDSeed, unsigned long long injInstID, int32_t bitwidth, uint32_t injBFM) {
uint32_t injBID = get_int_inj_id(bitwidth, injBIDSeed);
int64_t addr = mp->GetAddress();
uint128_t *memAddr = (uint128_t*) addr;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value before=%llx, %llx\n", memAddr, (*memAddr).values[0], (*memAddr).values[1]);
printf(":::Injecting: pc=%llx bbId=%d opcode=%s tid=%d instCount=%lld instType=st%d injBID=%d:::", ap->GetPUPC(), ap->GetBBID(), SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID, bitwidth, injBID);
if (!DUMMY_INJECTION) {
if (injBFM == FLIP_SINGLE_BIT || injBFM == WARP_FLIP_SINGLE_BIT) {
if (injBID < 64) {
memAddr->values[0] = memAddr->values[0] ^ ((uint64_t)1<<injBID); // actual error injection
} else {
memAddr->values[1] = memAddr->values[1] ^ ((uint64_t)1<<(injBID-64)); // actual error injection
}
} else if (injBFM == FLIP_TWO_BITS || injBFM == WARP_FLIP_TWO_BITS) {
if (injBID < 63) {
memAddr->values[0] = memAddr->values[0] ^ ((uint64_t)3<<injBID); // actual error injection
} else if (injBID == 63) {
memAddr->values[0] = memAddr->values[0] ^ ((uint64_t)1<<injBID); // actual error injection
memAddr->values[1] = memAddr->values[1] ^ ((uint64_t)1<<(injBID-64)); // actual error injection
} else {
memAddr->values[1] = memAddr->values[1] ^ ((uint64_t)3<<(injBID-64)); // actual error injection
}
} else if (injBFM == RANDOM_VALUE || injBFM == WARP_RANDOM_VALUE) {
memAddr->values[0] = ((uint64_t)(-1))*injBIDSeed;
memAddr->values[1] = ((uint64_t)(-1))*injBIDSeed;
} else if (injBFM == ZERO_VALUE || injBFM == WARP_ZERO_VALUE) {
memAddr->values[0] = 0;
memAddr->values[1] = 0;
}
}
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value before=%llx, %llx\n", memAddr, memAddr->values[0], memAddr->values[1]);
}
// Inject in store value
__device__ void inject_store_error(SASSIAfterParams* ap, SASSIMemoryParams *mp, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM) {
int32_t bitwidth = 8*mp->GetWidth(); // GetWidth returns bytes
if (bitwidth == 32) { // most common case
inject_store_error_t<uint32_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 8) {
inject_store_error_t<uint8_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 16) {
inject_store_error_t<uint16_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 64) {
inject_store_error_t<uint64_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 128) {
inject_store128_error_t(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
DEBUG_PRINT(1, "WARNING: No injection for bitwidth=%d\n", bitwidth);
}
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in GPR registers
////////////////////////////////////////////////////////////////////////////////////
__device__ void inject_GPR_error(SASSICoreParams* cp, SASSIRegisterParams *rp, SASSIRegisterParams::GPRRegInfo regInfo, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
// get the value in the register, and inject error
int32_t valueInReg = rp->GetRegValue(cp, regInfo).asInt;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection candidate: register destination = %d \n", rp->GetRegNum(regInfo));
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Before Injection: register value = %x \n", valueInReg);
SASSIRegisterParams::GPRRegValue injectedVal;
injectedVal.asUint = 0;
uint32_t injBID = 0;
if (injBFM == FLIP_SINGLE_BIT || injBFM == WARP_FLIP_SINGLE_BIT) {
injBID = get_int_inj_id(32, injBIDSeed);
injectedVal.asUint = valueInReg ^ (1<<injBID); // actual error injection
} else if (injBFM == FLIP_TWO_BITS || injBFM == WARP_FLIP_TWO_BITS) {
injBID = get_int_inj_id(31, injBIDSeed);
injectedVal.asUint = valueInReg ^ (3<<injBID); // actual error injection
} else if (injBFM == RANDOM_VALUE || injBFM == WARP_RANDOM_VALUE) {
injectedVal.asUint = ((uint32_t)-1) * injBIDSeed;
} else if (injBFM == ZERO_VALUE || injBFM == WARP_ZERO_VALUE) {
injectedVal.asUint = 0;
}
printf(":::Injecting: pc=%llx bbId=%d GlobalInstCount=%lld AppDynInstCount=%lld opcode=%s tid=%d instCount=%lld instType=GPR regNum=%d injBID=%d:::",
cp->GetPUPC(), cp->GetBBID(), injCounterAllInsts, dyn_inst_count, SASSIInstrOpcodeStrings[cp->GetOpcode()], get_flat_tid(), injInstID,
rp->GetRegNum(regInfo), injBID);
if (!DUMMY_INJECTION) {
rp->SetRegValue(cp, regInfo, injectedVal);
}
int32_t valueInRegAfter = rp->GetRegValue(cp, regInfo).asInt;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "After Injection: register value = %x, ", valueInRegAfter);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "injectedVal = %x \n", injectedVal.asUint);
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in CC registers
////////////////////////////////////////////////////////////////////////////////////
__device__ void inject_CC_error(SASSIAfterParams* ap, SASSIRegisterParams *rp, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
uint8_t valueInReg = rp->SASSIGetCCRegisterVal(ap); // read CC register value, only low 4 bits are used
uint8_t injBID = get_int_inj_id(4, injBIDSeed);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Before Injection: CC register value = %x \n", valueInReg);
printf(":::Injecting: pc=%llx bbId=%d GlobalInstCount=%lld AppDynInstCount=%lld opcode=%s tid=%d instCount=%lld instType=CC regNum=-1 injBID=%d:::",
ap->GetPUPC(), ap->GetBBID(), injCounterAllInsts, dyn_inst_count, SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID, injBID);
uint8_t injectedVal = 0;
if (injBFM == FLIP_SINGLE_BIT) {
injectedVal = valueInReg ^ (1<<injBID); // actual error injection
}
if (!DUMMY_INJECTION) {
rp->SASSISetCCRegisterVal(ap, injectedVal);
}
uint8_t valueInRegAfter = rp->SASSIGetCCRegisterVal(ap);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "After Injection: register value = %x ", valueInRegAfter);
DEBUG_PRINT(INJ_DEBUG_LIGHT, ", injectedVal = %x \n", injectedVal);
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in PR registers
////////////////////////////////////////////////////////////////////////////////////
__device__ void inject_PR_error(SASSIAfterParams* ap, SASSIRegisterParams *rp, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
uint8_t valueInReg = rp->SASSIGetPredicateRegisterVal(ap); // read PR register value
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Before Injection: PR register value = %x \n", valueInReg);
printf(":::Injecting: pc=%llx bbId=%d GlobalInstCount=%lld AppDynInstCount=%lld opcode=%s tid=%d instCount=%lld instType=PR regNum=-1 injBID=0:::",
ap->GetPUPC(), ap->GetBBID(), injCounterAllInsts, dyn_inst_count, SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID);
uint8_t injectedVal = 0;
if (injBFM == FLIP_SINGLE_BIT) {
injectedVal = valueInReg ^ get_inj_mask(rp->GetPredicateDstMask(), injBIDSeed); // actual error injection
}
if (!DUMMY_INJECTION) {
rp->SASSISetPredicateRegisterVal(ap, injectedVal);
}
uint8_t valueInRegAfter = rp->SASSIGetPredicateRegisterVal(ap);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "After Injection: register value = %x ", valueInRegAfter);
DEBUG_PRINT(INJ_DEBUG_LIGHT, ", injectedVal = %x \n", injectedVal);
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in any destination register
////////////////////////////////////////////////////////////////////////////////////
__device__ __noinline__ void inject_reg_error(SASSIAfterParams* ap, SASSIRegisterParams *rp, float injOpSeed, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
int32_t numDestRegs = rp->GetNumGPRDsts(); // Get the number of destination registers assigned by this instruction.
int32_t numDestOps = numDestRegs + rp->IsCCDefined() + rp->GetPredicateDstMask() != 0; // num gpr regs + 1 for CC + 1 for PR
DEBUG_PRINT(INJ_DEBUG_LIGHT, "At: tid=%d instCount=%lld opcode=%s numDestOps=%d, isCCDefined=%d, isPredicateDefined=%d\n", get_flat_tid(), injInstID, SASSIInstrOpcodeStrings[ap->GetOpcode()], numDestOps, rp->IsCCDefined(), rp->GetPredicateDstMask() != 0);
if (numDestOps == 0) // cannot inject - no destination operands
return;
int32_t injOpID = get_int_inj_id(numDestOps, injOpSeed);
if (injOpID < numDestRegs) { // inject in a GPR
SASSIRegisterParams::GPRRegInfo regInfo = rp->GetGPRDst(injOpID); // get destination register info, get the value in that register, and inject error
inject_GPR_error(ap, rp, regInfo, injBIDSeed, injInstID, injBFM, dyn_inst_count);
} else if (injOpID - numDestRegs + 1 == rp->IsCCDefined()) { // inject in CC register
inject_CC_error(ap, rp, injBIDSeed, injInstID, injBFM, dyn_inst_count);
} else { // inject in PR Register
inject_PR_error(ap, rp, injBIDSeed, injInstID, injBFM, dyn_inst_count);
}
}
// return 0 if the injRegID is not found in the list of destination registers, else returns the index
__device__ int32_t is_dest_reg(SASSIRegisterParams *rp, int32_t injRegID) {
int32_t numDestRegs = rp->GetNumGPRDsts(); // Get the number of destination registers assigned by this instruction.
for (int32_t i=0; i<numDestRegs; i++) {
if (rp->GetRegNum(rp->GetGPRDst(i)) == injRegID) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection candidate found in destination: register destination = %d \n", injRegID);
return i;
}
}
return 0;
}
// return 0 if the injRegID is not found in the list of source registers, else returns the index
__device__ int32_t is_src_reg(SASSIRegisterParams *rp, int32_t injRegID) {
int32_t numSrcRegs = rp->GetNumGPRSrcs(); // Get the number of destination registers assigned by this instruction.
for (int32_t i=0; i<numSrcRegs; i++) {
if (rp->GetRegNum(rp->GetGPRSrc(i)) == injRegID) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection candidate found in source: register destination = %d \n", injRegID);
return 1;
}
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////
// SASSI before handler: This function will be called before the instruction
// gets executed. This is used only for RF-AVF injections. This function first
// marks the register for injection. It then checks whether the register is
// used in subsequent instructions. If it is used as a destination before
// being read, the injection run terminates the run is categorized as masked.
// If the register is not found in any of the source registers before the
// thread exits, the injection run is categorized as masked.
///////////////////////////////////////////////////////////////////////////////////
__device__ void sassi_before_handler(SASSIBeforeParams* bp, SASSIMemoryParams *mp, SASSIRegisterParams *rp) {
#if EMPTY_HANDLER && INJ_MODE != RF_INJECTIONS // if you don't want to inject RF based errors, return
return;
#endif
if (!inj_info.areParamsReady) // Check if this is the kernel of interest
return; // This is not the selected kernel. No need to proceed.
unsigned long long currInstCounter = atomicAdd(&injCounterAllInsts, 1LL) + 1; // update counter, returns old value
if (inj_info.injInstID == currInstCounter) { // the current instruction count matches injInstID matches, time to inject the erorr
// record thread number, and RF to inject.
// Note we are not injecting the error here, we are just recording it. We
// will inject the error when it is used as a source register by the
// subsequent instructions.
inj_info.injThreadID = get_flat_tid();
inj_info.readyToInject = true;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection point reached: tid=%lld instCount=%lld \n", inj_info.injThreadID, inj_info.injInstID);
}
// if readyToInject is set and this is the thread that was selected, check for error injection
if (inj_info.readyToInject && inj_info.injThreadID == get_flat_tid() && !inj_info.errorInjected) {
// check if the selected register is either in source registers or destination registers
if (is_dest_reg(rp, inj_info.injOpSeed) != 0) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Terminating run: Write found before a read tid=%lld\n", inj_info.injThreadID);
// Record this injection as Masked and terminate
inj_info.writeBeforeRead = true;
__threadfence(); // ensure store issued before trap
asm("trap;"); // kill kernel with error
}
int32_t src_reg = is_src_reg(rp, inj_info.injOpSeed);
if (src_reg != 0) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Reached actual injection point tid=%lld\n", inj_info.injThreadID);
inj_info.errorInjected = true;
inject_GPR_error(bp, rp, rp->GetGPRSrc(src_reg), inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, currInstCounter); // Inject the error and contine
}
}
}
///////////////////////////////////////////////////////////////////////////////////
// SASSI After handler: This function is called after every SASS instruction.
// This is used for instruction output-level injections only. This function
// first checks whether the injection parameters are ready. If so, it checks
// the instruction group id of the current instruction and then proceeds to
// the respective function to check and perform error injection.
///////////////////////////////////////////////////////////////////////////////////
__device__ void sassi_after_handler(SASSIAfterParams* ap, SASSIMemoryParams *mp, SASSIRegisterParams *rp) { // order is important
/***FRITZ**/
unsigned long long dyn_inst_count = atomicAdd(&AppDynInstCount, 1LL);
#if INTERVAL_MODE_INJECTION == 1
int interval = dyn_inst_count / inj_info.injIntervalSize;
if (inj_info.injIntervalID != interval) //{
return;
#endif
atomicAdd(&injCounterAllInsts, 1LL);
#if EMPTY_HANDLER && INJ_MODE != INST_INJECTIONS // if you don't want to inject instruction level errors, return
return;
#endif
#if INTERVAL_MODE_INJECTION != 1
if (inj_info.injPC != ap->GetPUPC()) // Check if this is the kernel of interest
return; // This is not the selected kernel. No need to proceed.
#endif
switch (inj_info.injIGID) {
case GPR: {
if (has_dest_GPR(rp)) {
unsigned long long pcCounter = atomicAdd(&InjPCCount, 1LL); // update counter, return old value
#if INTERVAL_MODE_INJECTION == 1
//__threadfence();
unsigned long long currIntervalInstCount = atomicAdd(&IntervalInstCount, 1LL);
bool cond = inj_info.injInstID == currIntervalInstCount;// && inj_info.intervalModeReady && (!inj_info.intervalModeInjected);
#else
bool cond = inj_info.injPCCount == pcCounter; // the current opcode matches injIGID and injInstID matches
#endif
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
/* printf("--------------\n"
"injPC %lx , currentPC:%lx, pcCount=%lldi looking for %lld\n"
"----------------------------\n",
inj_info.injPC, ap->GetPUPC(), pcCounter,
inj_info.injPCCount);*/
// get destination register info, get the value in that register, and inject error
#if INTERVAL_MODE_INJECTION == 1
inj_info.intervalModeInjected = true;
#endif
SASSIRegisterParams::GPRRegInfo regInfo = rp->GetGPRDst(get_int_inj_id(rp->GetNumGPRDsts(), inj_info.injOpSeed));
inject_GPR_error(ap, rp, regInfo, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case DEST_REG: {
if (has_dest_reg(rp)) {
unsigned long long pcCounter = atomicAdd(&InjPCCount, 1LL); // update counter, return old value
#if INTERVAL_MODE_INJECTION == 1
//__threadfence();
unsigned long long currIntervalInstCount = atomicAdd(&IntervalInstCount, 1LL);
bool cond = inj_info.injInstID == currIntervalInstCount;// && inj_info.intervalModeReady && (!inj_info.intervalModeInjected);
#else
bool cond = inj_info.injPCCount == pcCounter; // the current opcode matches injIGID and injInstID matches
#endif
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
// get destination register info, get the value in that register, and inject error
#if INTERVAL_MODE_INJECTION == 1
inj_info.intervalModeInjected = true;
#endif
inject_reg_error(ap, rp, inj_info.injOpSeed, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case CC: {
if (has_dest_CC(rp)) {
if (inj_info.injInstID == atomicAdd(&injCountersInstType[CC], 1LL)) {
inject_CC_error(ap, rp, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case PR: {
if (has_dest_PR(rp)) {
if (inj_info.injInstID == atomicAdd(&injCountersInstType[PR], 1LL)) {
inject_PR_error(ap, rp, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case STORE_VAL: {
if (is_store_inst(ap, mp)) {
unsigned long long currInstCounter = atomicAdd(&injCountersInstType[STORE_VAL], 1LL); // update counter, return old value
bool cond = inj_info.injInstID == currInstCounter; // the current opcode matches injIGID and injInstID matches
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
inject_store_error(ap, mp, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM);
}
}
}
break;
case LD_OP:
case LDS_OP:
case IADD_IMUL_OP:
case FADD_FMUL_OP:
case MAD_OP:
case FMA_OP:
case SETP_OP: {
int32_t currInstCat = get_op_category(ap->GetOpcode());
unsigned long long currInstCounter = atomicAdd(&injCountersInstType[currInstCat], 1LL); // update counter, return old value
bool cond = inj_info.injIGID == currInstCat && inj_info.injInstID == currInstCounter; // the current opcode matches injIGID and injInstID matches
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
inject_reg_error(ap, rp, inj_info.injOpSeed, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
break;
case MISC_OP: break;
}
}
//////////////////////////////////////////////////////////////////////
// SASSI initialize, finalize, and other operations to be performed
// on kernel entry and exit
//////////////////////////////////////////////////////////////////////
static void sassi_init() {
// read seeds for random error injection
parse_params(injInputFilename.c_str()); // injParams are updated based on injection seed file
AppDynInstCount = 0;
IntervalInstCount = 0;
InjPCCount = 0;
}
//////////////////////////////////////////////////////////////////////
// This function is invoked before a cuda-kernel starts executing.
// It resets profiling counters, updated knameCount (to keep track of how many
// kernels and their invocations are done), and updates injection parameters
// that are used by SASSI before and after handlers.
//////////////////////////////////////////////////////////////////////
static void onKernelEntry(const CUpti_CallbackData *cbInfo) {
reset_profiling_counters();
// update knameCount map
std::string currKernelName = cbInfo->symbolName;
if (knameCount.find(currKernelName) == knameCount.end()) {
knameCount[currKernelName] = 0;
} else {
knameCount[currKernelName] += 1;
}
std::string injKernelName = inj_info.injKernelName;
// pass injParams if this is not the kernel of interest
bool is_inj_kernel_name = injKernelName.compare(cbInfo->symbolName) == 0; // if the current kernel name is not same as injKernelName
bool is_inj_kernel_count = (knameCount.find(injKernelName) != knameCount.end()) ? knameCount[injKernelName] == inj_info.injKCount : false; // if kernel name is found, check if injKCount matches knameCount[injKernelName]
inj_info.areParamsReady = is_inj_kernel_name && is_inj_kernel_count; // mark the injection params ready
if (inj_info.areParamsReady)
DEBUG_PRINT(INJ_DEBUG_LIGHT, "areParamsReady=%d, injkname=%s, curr kname=%s, injKCount=%d, is_inj_kernel_count=%d \n", inj_info.areParamsReady, injKernelName.c_str(), cbInfo->symbolName, inj_info.injKCount, is_inj_kernel_count);
#if TIMING
gettimeofday(&start, NULL);
#endif
}
//////////////////////////////////////////////////////////////////////
// This function is called after every cuda-kernel execution.
//////////////////////////////////////////////////////////////////////
static void onKernelExit(const CUpti_CallbackData *cbInfo) {
hipError_t * error = (hipError_t*) cbInfo->functionReturnValue;
if ( (*error) != hipSuccess ) {
printf("Kernel Exit Error: %d", (*error));
}
#if INJ_MODE == RF_INJECTIONS
if (inj_info.areParamsReady) { // Check if this is the kernel of interest
if (inj_info.readyToInject && inj_info.writeBeforeRead) { // error was ready to be injected, but the register was overwritten before being read
printf("Masked: Write before read\n");
exit(0); // exit the simulation
} else if (inj_info.readyToInject && !inj_info.errorInjected) { // error was ready to be injected, but was never injected
printf("Masked: Error was never read\n");
exit(0); // exit the simulation
}
}
#endif
#if TIMING
gettimeofday(&end, NULL);
long seconds, useconds;
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
float mTime = ((seconds) * 1000 + useconds/1000.0);
printf("\nTime for %s: %f ms\n", cbInfo->symbolName, mTime);
mTotalTime += mTime;
#endif
}
static void sassi_finalize(sassi::lazy_allocator::device_reset_reason reason)
{
#if TIMING
printf("\nTotal kernel time: %f ms\n", mTotalTime);
#endif
}
static sassi::lazy_allocator injectorInit(sassi_init, sassi_finalize, onKernelEntry, onKernelExit);
| c7ea9142876d6bcf3a066f0f39dffb50de68cac7.cu | /*********************************************************************************** \
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\***********************************************************************************/
#define __STDC_FORMAT_MACROS
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <string>
#include <fstream>
#include <algorithm>
#include <map>
#include <sys/time.h>
#include <cupti.h>
#include <sassi/sassi-core.hpp>
#include <sassi/sassi-regs.hpp>
#include <sassi/sassi-memory.hpp>
#include <sassi/sassi-opcodes.h>
#include "sassi_intrinsics.h"
#include "sassi_dictionary.hpp"
#include "sassi_lazyallocator.hpp"
#include "error_injector.h"
// knameCount keeps track of kernel names and the number of invocations of the
// cuda kernels that executed so far during the application execution.
std::map<std::string, int> knameCount;
#if TIMING
struct timeval start, end;
float mTotalTime = 0;
#endif
//////////////////////////////////////////////////////////////////////
// Error injection parameters and related functions
//////////////////////////////////////////////////////////////////////
typedef struct {
bool areParamsReady;
bool errorInjected;
bool writeBeforeRead;
bool readyToInject;
#if INTERVAL_MODE_INJECTION == 1
bool intervalModeReady;
bool intervalModeInjected;
int injIntervalSize;
int injIntervalID;
#endif
char injKernelName[MAX_KNAME_SIZE];
long long injThreadID; // injection thread id
int32_t injKCount;
int32_t injIGID; // arch state id
uint64_t injPC;
unsigned long long injPCCount;
unsigned long long injInstID; // injection inst id
float injOpSeed; // injection operand id seed (random number between 0-1)
uint32_t injBFM; // error model
float injBIDSeed; // bit id seed (random number between 0-1)
} inj_info_t;
__managed__ inj_info_t inj_info;
void reset_inj_info() {
inj_info.areParamsReady = false;
inj_info.errorInjected = false;
inj_info.writeBeforeRead = false;
inj_info.readyToInject = false;
#if INTERVAL_MODE_INJECTION == 1
inj_info.intervalModeReady = false;
inj_info.intervalModeInjected = false;
inj_info.injIntervalSize = 0;
inj_info.injIntervalID = -1;
#endif
inj_info.injThreadID = -1;
inj_info.injKernelName[0] = '\0';
inj_info.injKCount = 0;
inj_info.injIGID = 0; // arch state id
inj_info.injInstID = 0; // instruction id
inj_info.injOpSeed = 0; // destination id seed (float, 0-1)
inj_info.injBIDSeed = 0; // bit location seed (float, 0-1)
inj_info.injBFM = 0; // fault model: single bit flip, all bit flip, random value
}
// for debugging
void print_inj_info() {
#if INTERVAL_MODE_INJECTION != 1
printf("injPC=%lx, injPCCount=%lld, ", inj_info.injPC, inj_info.injPCCount);
#endif
printf("inj_igid=%d, inj_fault_model=%d, inj_inst_id=%lld",
inj_info.injIGID, inj_info.injBFM, inj_info.injInstID);
printf("inj_destination_id=%f, inj_bit_location=%f \n", inj_info.injOpSeed, inj_info.injBIDSeed);
}
// Parse error injection site info from a file. This should be done on host side.
void parse_params(std::string filename) {
reset_inj_info();
std::ifstream ifs (filename.c_str(), std::ifstream::in);
if (ifs.is_open()) {
#if INJ_MODE != RF_INJECTIONS
ifs >> inj_info.injIGID; // arch state id
assert(inj_info.injIGID >=0 && inj_info.injIGID < NUM_INST_TYPES); // ensure that the value is in the expected range
#endif
ifs >> inj_info.injBFM; // fault model: single bit flip, all bit flip, random value
assert(inj_info.injBFM < NUM_BFM_TYPES); // ensure that the value is in the expected range
#if INTERVAL_MODE_INJECTION != 1
// ifs >> inj_info.injKernelName;
// ifs >> inj_info.injKCount;
#else
ifs >> inj_info.injIntervalSize;
ifs >> inj_info.injIntervalID;
#endif
ifs >> std::hex >> inj_info.injPC; // PC
ifs >> std::dec >> inj_info.injPCCount;
// ifs >> inj_info.injInstID; // instruction id
ifs >> inj_info.injOpSeed; // destination id seed (float, 0-1 for inst injections and 0-256 for reg)
#if INJ_MODE != RF_INJECTIONS
assert(inj_info.injOpSeed >=0 && inj_info.injOpSeed < 1.01); // ensure that the value is in the expected range
#else
assert(inj_info.injOpSeed >=0 && inj_info.injOpSeed < 257); // ensure that the value is in the expected range
#endif
ifs >> inj_info.injBIDSeed; // bit location seed (float, 0-1)
assert(inj_info.injBIDSeed >= 0 && inj_info.injBIDSeed < 1.01); // ensure that the value is in the expected range
}
ifs.close();
if (INJ_DEBUG_LIGHT) {
print_inj_info();
}
}
//////////////////////////////////////////////////////////////////////
// Functions for actual error injection
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Input: inMask and injBIDSeed
// Output: outMask that selects which bit to flip among the bits that are 1 in inMask
//////////////////////////////////////////////////////////////////////
__device__ uint8_t get_inj_mask(uint8_t inMask, uint8_t injBIDSeed) {
uint8_t outMask = 1;
uint8_t tempInMask = inMask;
int i, count=0;
for (i=0; i<8; i++) { // counting number of 1s in inMask
if (tempInMask & 0x1 == 1) {
count++;
}
tempInMask = tempInMask >> 1;
}
if (INJ_DEBUG_HEAVY) {
printf(" count = %d \n", count);
}
uint8_t injBID = get_int_inj_id(count, injBIDSeed);
if (INJ_DEBUG_HEAVY) {
printf(" injBID = %d \n", injBID);
}
count = 0;
tempInMask = inMask;
for (i=0; i<8; i++) { // counting number of 1s in inMask
if (tempInMask & 0x1 == 1) {
if (count == injBID)
break;
count++;
}
tempInMask = tempInMask >> 1;
outMask = outMask << 1;
}
if (INJ_DEBUG_HEAVY) {
printf(" inMask=%x, outMask=%x \n", inMask, outMask);
}
return outMask;
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in store instructions
////////////////////////////////////////////////////////////////////////////////////
template <typename T>
__device__ void inject_store_error_t(SASSIAfterParams* ap, SASSIMemoryParams *mp, float injBIDSeed, unsigned long long injInstID, int32_t bitwidth, uint32_t injBFM) {
uint32_t injBID = get_int_inj_id(bitwidth, injBIDSeed);
int64_t addr = mp->GetAddress();
T *memAddr = (T*) addr;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value before=%llx\n", memAddr, *memAddr);
printf(":::Injecting: pc=%llx bbId=%d opcode=%s tid=%d instCount=%lld instType=st%d injBID=%d:::", ap->GetPUPC(), ap->GetBBID(), SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID, bitwidth, injBID);
if (!DUMMY_INJECTION) {
if(injBFM == FLIP_SINGLE_BIT || injBFM == WARP_FLIP_SINGLE_BIT) {
*memAddr = *memAddr ^ ((T)1<<injBID); // actual error injection
} else if (injBFM == FLIP_TWO_BITS || injBFM == WARP_FLIP_TWO_BITS) {
*memAddr = *memAddr ^ ((T)3<<injBID); // actual error injection
} else if (injBFM == RANDOM_VALUE || injBFM == WARP_RANDOM_VALUE) {
*memAddr = ((T)(-1))*injBIDSeed;
} else if (injBFM == ZERO_VALUE || injBFM == WARP_ZERO_VALUE) {
*memAddr = 0;
}
}
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value after=%llx\n", memAddr, *memAddr);
}
__device__ void inject_store128_error_t(SASSIAfterParams* ap, SASSIMemoryParams *mp, float injBIDSeed, unsigned long long injInstID, int32_t bitwidth, uint32_t injBFM) {
uint32_t injBID = get_int_inj_id(bitwidth, injBIDSeed);
int64_t addr = mp->GetAddress();
uint128_t *memAddr = (uint128_t*) addr;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value before=%llx, %llx\n", memAddr, (*memAddr).values[0], (*memAddr).values[1]);
printf(":::Injecting: pc=%llx bbId=%d opcode=%s tid=%d instCount=%lld instType=st%d injBID=%d:::", ap->GetPUPC(), ap->GetBBID(), SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID, bitwidth, injBID);
if (!DUMMY_INJECTION) {
if (injBFM == FLIP_SINGLE_BIT || injBFM == WARP_FLIP_SINGLE_BIT) {
if (injBID < 64) {
memAddr->values[0] = memAddr->values[0] ^ ((uint64_t)1<<injBID); // actual error injection
} else {
memAddr->values[1] = memAddr->values[1] ^ ((uint64_t)1<<(injBID-64)); // actual error injection
}
} else if (injBFM == FLIP_TWO_BITS || injBFM == WARP_FLIP_TWO_BITS) {
if (injBID < 63) {
memAddr->values[0] = memAddr->values[0] ^ ((uint64_t)3<<injBID); // actual error injection
} else if (injBID == 63) {
memAddr->values[0] = memAddr->values[0] ^ ((uint64_t)1<<injBID); // actual error injection
memAddr->values[1] = memAddr->values[1] ^ ((uint64_t)1<<(injBID-64)); // actual error injection
} else {
memAddr->values[1] = memAddr->values[1] ^ ((uint64_t)3<<(injBID-64)); // actual error injection
}
} else if (injBFM == RANDOM_VALUE || injBFM == WARP_RANDOM_VALUE) {
memAddr->values[0] = ((uint64_t)(-1))*injBIDSeed;
memAddr->values[1] = ((uint64_t)(-1))*injBIDSeed;
} else if (injBFM == ZERO_VALUE || injBFM == WARP_ZERO_VALUE) {
memAddr->values[0] = 0;
memAddr->values[1] = 0;
}
}
DEBUG_PRINT(INJ_DEBUG_LIGHT, "memAddr=%llx: value before=%llx, %llx\n", memAddr, memAddr->values[0], memAddr->values[1]);
}
// Inject in store value
__device__ void inject_store_error(SASSIAfterParams* ap, SASSIMemoryParams *mp, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM) {
int32_t bitwidth = 8*mp->GetWidth(); // GetWidth returns bytes
if (bitwidth == 32) { // most common case
inject_store_error_t<uint32_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 8) {
inject_store_error_t<uint8_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 16) {
inject_store_error_t<uint16_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 64) {
inject_store_error_t<uint64_t>(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
} else if (bitwidth == 128) {
inject_store128_error_t(ap, mp, injBIDSeed, injInstID, bitwidth, injBFM);
DEBUG_PRINT(1, "WARNING: No injection for bitwidth=%d\n", bitwidth);
}
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in GPR registers
////////////////////////////////////////////////////////////////////////////////////
__device__ void inject_GPR_error(SASSICoreParams* cp, SASSIRegisterParams *rp, SASSIRegisterParams::GPRRegInfo regInfo, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
// get the value in the register, and inject error
int32_t valueInReg = rp->GetRegValue(cp, regInfo).asInt;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection candidate: register destination = %d \n", rp->GetRegNum(regInfo));
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Before Injection: register value = %x \n", valueInReg);
SASSIRegisterParams::GPRRegValue injectedVal;
injectedVal.asUint = 0;
uint32_t injBID = 0;
if (injBFM == FLIP_SINGLE_BIT || injBFM == WARP_FLIP_SINGLE_BIT) {
injBID = get_int_inj_id(32, injBIDSeed);
injectedVal.asUint = valueInReg ^ (1<<injBID); // actual error injection
} else if (injBFM == FLIP_TWO_BITS || injBFM == WARP_FLIP_TWO_BITS) {
injBID = get_int_inj_id(31, injBIDSeed);
injectedVal.asUint = valueInReg ^ (3<<injBID); // actual error injection
} else if (injBFM == RANDOM_VALUE || injBFM == WARP_RANDOM_VALUE) {
injectedVal.asUint = ((uint32_t)-1) * injBIDSeed;
} else if (injBFM == ZERO_VALUE || injBFM == WARP_ZERO_VALUE) {
injectedVal.asUint = 0;
}
printf(":::Injecting: pc=%llx bbId=%d GlobalInstCount=%lld AppDynInstCount=%lld opcode=%s tid=%d instCount=%lld instType=GPR regNum=%d injBID=%d:::",
cp->GetPUPC(), cp->GetBBID(), injCounterAllInsts, dyn_inst_count, SASSIInstrOpcodeStrings[cp->GetOpcode()], get_flat_tid(), injInstID,
rp->GetRegNum(regInfo), injBID);
if (!DUMMY_INJECTION) {
rp->SetRegValue(cp, regInfo, injectedVal);
}
int32_t valueInRegAfter = rp->GetRegValue(cp, regInfo).asInt;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "After Injection: register value = %x, ", valueInRegAfter);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "injectedVal = %x \n", injectedVal.asUint);
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in CC registers
////////////////////////////////////////////////////////////////////////////////////
__device__ void inject_CC_error(SASSIAfterParams* ap, SASSIRegisterParams *rp, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
uint8_t valueInReg = rp->SASSIGetCCRegisterVal(ap); // read CC register value, only low 4 bits are used
uint8_t injBID = get_int_inj_id(4, injBIDSeed);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Before Injection: CC register value = %x \n", valueInReg);
printf(":::Injecting: pc=%llx bbId=%d GlobalInstCount=%lld AppDynInstCount=%lld opcode=%s tid=%d instCount=%lld instType=CC regNum=-1 injBID=%d:::",
ap->GetPUPC(), ap->GetBBID(), injCounterAllInsts, dyn_inst_count, SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID, injBID);
uint8_t injectedVal = 0;
if (injBFM == FLIP_SINGLE_BIT) {
injectedVal = valueInReg ^ (1<<injBID); // actual error injection
}
if (!DUMMY_INJECTION) {
rp->SASSISetCCRegisterVal(ap, injectedVal);
}
uint8_t valueInRegAfter = rp->SASSIGetCCRegisterVal(ap);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "After Injection: register value = %x ", valueInRegAfter);
DEBUG_PRINT(INJ_DEBUG_LIGHT, ", injectedVal = %x \n", injectedVal);
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in PR registers
////////////////////////////////////////////////////////////////////////////////////
__device__ void inject_PR_error(SASSIAfterParams* ap, SASSIRegisterParams *rp, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
uint8_t valueInReg = rp->SASSIGetPredicateRegisterVal(ap); // read PR register value
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Before Injection: PR register value = %x \n", valueInReg);
printf(":::Injecting: pc=%llx bbId=%d GlobalInstCount=%lld AppDynInstCount=%lld opcode=%s tid=%d instCount=%lld instType=PR regNum=-1 injBID=0:::",
ap->GetPUPC(), ap->GetBBID(), injCounterAllInsts, dyn_inst_count, SASSIInstrOpcodeStrings[ap->GetOpcode()], get_flat_tid(), injInstID);
uint8_t injectedVal = 0;
if (injBFM == FLIP_SINGLE_BIT) {
injectedVal = valueInReg ^ get_inj_mask(rp->GetPredicateDstMask(), injBIDSeed); // actual error injection
}
if (!DUMMY_INJECTION) {
rp->SASSISetPredicateRegisterVal(ap, injectedVal);
}
uint8_t valueInRegAfter = rp->SASSIGetPredicateRegisterVal(ap);
DEBUG_PRINT(INJ_DEBUG_LIGHT, "After Injection: register value = %x ", valueInRegAfter);
DEBUG_PRINT(INJ_DEBUG_LIGHT, ", injectedVal = %x \n", injectedVal);
}
////////////////////////////////////////////////////////////////////////////////////
// Injecting errors in any destination register
////////////////////////////////////////////////////////////////////////////////////
__device__ __noinline__ void inject_reg_error(SASSIAfterParams* ap, SASSIRegisterParams *rp, float injOpSeed, float injBIDSeed, unsigned long long injInstID, uint32_t injBFM, unsigned long long dyn_inst_count) {
int32_t numDestRegs = rp->GetNumGPRDsts(); // Get the number of destination registers assigned by this instruction.
int32_t numDestOps = numDestRegs + rp->IsCCDefined() + rp->GetPredicateDstMask() != 0; // num gpr regs + 1 for CC + 1 for PR
DEBUG_PRINT(INJ_DEBUG_LIGHT, "At: tid=%d instCount=%lld opcode=%s numDestOps=%d, isCCDefined=%d, isPredicateDefined=%d\n", get_flat_tid(), injInstID, SASSIInstrOpcodeStrings[ap->GetOpcode()], numDestOps, rp->IsCCDefined(), rp->GetPredicateDstMask() != 0);
if (numDestOps == 0) // cannot inject - no destination operands
return;
int32_t injOpID = get_int_inj_id(numDestOps, injOpSeed);
if (injOpID < numDestRegs) { // inject in a GPR
SASSIRegisterParams::GPRRegInfo regInfo = rp->GetGPRDst(injOpID); // get destination register info, get the value in that register, and inject error
inject_GPR_error(ap, rp, regInfo, injBIDSeed, injInstID, injBFM, dyn_inst_count);
} else if (injOpID - numDestRegs + 1 == rp->IsCCDefined()) { // inject in CC register
inject_CC_error(ap, rp, injBIDSeed, injInstID, injBFM, dyn_inst_count);
} else { // inject in PR Register
inject_PR_error(ap, rp, injBIDSeed, injInstID, injBFM, dyn_inst_count);
}
}
// return 0 if the injRegID is not found in the list of destination registers, else returns the index
__device__ int32_t is_dest_reg(SASSIRegisterParams *rp, int32_t injRegID) {
int32_t numDestRegs = rp->GetNumGPRDsts(); // Get the number of destination registers assigned by this instruction.
for (int32_t i=0; i<numDestRegs; i++) {
if (rp->GetRegNum(rp->GetGPRDst(i)) == injRegID) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection candidate found in destination: register destination = %d \n", injRegID);
return i;
}
}
return 0;
}
// return 0 if the injRegID is not found in the list of source registers, else returns the index
__device__ int32_t is_src_reg(SASSIRegisterParams *rp, int32_t injRegID) {
int32_t numSrcRegs = rp->GetNumGPRSrcs(); // Get the number of destination registers assigned by this instruction.
for (int32_t i=0; i<numSrcRegs; i++) {
if (rp->GetRegNum(rp->GetGPRSrc(i)) == injRegID) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection candidate found in source: register destination = %d \n", injRegID);
return 1;
}
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////
// SASSI before handler: This function will be called before the instruction
// gets executed. This is used only for RF-AVF injections. This function first
// marks the register for injection. It then checks whether the register is
// used in subsequent instructions. If it is used as a destination before
// being read, the injection run terminates the run is categorized as masked.
// If the register is not found in any of the source registers before the
// thread exits, the injection run is categorized as masked.
///////////////////////////////////////////////////////////////////////////////////
__device__ void sassi_before_handler(SASSIBeforeParams* bp, SASSIMemoryParams *mp, SASSIRegisterParams *rp) {
#if EMPTY_HANDLER && INJ_MODE != RF_INJECTIONS // if you don't want to inject RF based errors, return
return;
#endif
if (!inj_info.areParamsReady) // Check if this is the kernel of interest
return; // This is not the selected kernel. No need to proceed.
unsigned long long currInstCounter = atomicAdd(&injCounterAllInsts, 1LL) + 1; // update counter, returns old value
if (inj_info.injInstID == currInstCounter) { // the current instruction count matches injInstID matches, time to inject the erorr
// record thread number, and RF to inject.
// Note we are not injecting the error here, we are just recording it. We
// will inject the error when it is used as a source register by the
// subsequent instructions.
inj_info.injThreadID = get_flat_tid();
inj_info.readyToInject = true;
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Injection point reached: tid=%lld instCount=%lld \n", inj_info.injThreadID, inj_info.injInstID);
}
// if readyToInject is set and this is the thread that was selected, check for error injection
if (inj_info.readyToInject && inj_info.injThreadID == get_flat_tid() && !inj_info.errorInjected) {
// check if the selected register is either in source registers or destination registers
if (is_dest_reg(rp, inj_info.injOpSeed) != 0) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Terminating run: Write found before a read tid=%lld\n", inj_info.injThreadID);
// Record this injection as Masked and terminate
inj_info.writeBeforeRead = true;
__threadfence(); // ensure store issued before trap
asm("trap;"); // kill kernel with error
}
int32_t src_reg = is_src_reg(rp, inj_info.injOpSeed);
if (src_reg != 0) {
DEBUG_PRINT(INJ_DEBUG_LIGHT, "Reached actual injection point tid=%lld\n", inj_info.injThreadID);
inj_info.errorInjected = true;
inject_GPR_error(bp, rp, rp->GetGPRSrc(src_reg), inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, currInstCounter); // Inject the error and contine
}
}
}
///////////////////////////////////////////////////////////////////////////////////
// SASSI After handler: This function is called after every SASS instruction.
// This is used for instruction output-level injections only. This function
// first checks whether the injection parameters are ready. If so, it checks
// the instruction group id of the current instruction and then proceeds to
// the respective function to check and perform error injection.
///////////////////////////////////////////////////////////////////////////////////
__device__ void sassi_after_handler(SASSIAfterParams* ap, SASSIMemoryParams *mp, SASSIRegisterParams *rp) { // order is important
/***FRITZ**/
unsigned long long dyn_inst_count = atomicAdd(&AppDynInstCount, 1LL);
#if INTERVAL_MODE_INJECTION == 1
int interval = dyn_inst_count / inj_info.injIntervalSize;
if (inj_info.injIntervalID != interval) //{
return;
#endif
atomicAdd(&injCounterAllInsts, 1LL);
#if EMPTY_HANDLER && INJ_MODE != INST_INJECTIONS // if you don't want to inject instruction level errors, return
return;
#endif
#if INTERVAL_MODE_INJECTION != 1
if (inj_info.injPC != ap->GetPUPC()) // Check if this is the kernel of interest
return; // This is not the selected kernel. No need to proceed.
#endif
switch (inj_info.injIGID) {
case GPR: {
if (has_dest_GPR(rp)) {
unsigned long long pcCounter = atomicAdd(&InjPCCount, 1LL); // update counter, return old value
#if INTERVAL_MODE_INJECTION == 1
//__threadfence();
unsigned long long currIntervalInstCount = atomicAdd(&IntervalInstCount, 1LL);
bool cond = inj_info.injInstID == currIntervalInstCount;// && inj_info.intervalModeReady && (!inj_info.intervalModeInjected);
#else
bool cond = inj_info.injPCCount == pcCounter; // the current opcode matches injIGID and injInstID matches
#endif
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
/* printf("--------------\n"
"injPC %lx , currentPC:%lx, pcCount=%lldi looking for %lld\n"
"----------------------------\n",
inj_info.injPC, ap->GetPUPC(), pcCounter,
inj_info.injPCCount);*/
// get destination register info, get the value in that register, and inject error
#if INTERVAL_MODE_INJECTION == 1
inj_info.intervalModeInjected = true;
#endif
SASSIRegisterParams::GPRRegInfo regInfo = rp->GetGPRDst(get_int_inj_id(rp->GetNumGPRDsts(), inj_info.injOpSeed));
inject_GPR_error(ap, rp, regInfo, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case DEST_REG: {
if (has_dest_reg(rp)) {
unsigned long long pcCounter = atomicAdd(&InjPCCount, 1LL); // update counter, return old value
#if INTERVAL_MODE_INJECTION == 1
//__threadfence();
unsigned long long currIntervalInstCount = atomicAdd(&IntervalInstCount, 1LL);
bool cond = inj_info.injInstID == currIntervalInstCount;// && inj_info.intervalModeReady && (!inj_info.intervalModeInjected);
#else
bool cond = inj_info.injPCCount == pcCounter; // the current opcode matches injIGID and injInstID matches
#endif
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
// get destination register info, get the value in that register, and inject error
#if INTERVAL_MODE_INJECTION == 1
inj_info.intervalModeInjected = true;
#endif
inject_reg_error(ap, rp, inj_info.injOpSeed, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case CC: {
if (has_dest_CC(rp)) {
if (inj_info.injInstID == atomicAdd(&injCountersInstType[CC], 1LL)) {
inject_CC_error(ap, rp, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case PR: {
if (has_dest_PR(rp)) {
if (inj_info.injInstID == atomicAdd(&injCountersInstType[PR], 1LL)) {
inject_PR_error(ap, rp, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
}
break;
case STORE_VAL: {
if (is_store_inst(ap, mp)) {
unsigned long long currInstCounter = atomicAdd(&injCountersInstType[STORE_VAL], 1LL); // update counter, return old value
bool cond = inj_info.injInstID == currInstCounter; // the current opcode matches injIGID and injInstID matches
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
inject_store_error(ap, mp, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM);
}
}
}
break;
case LD_OP:
case LDS_OP:
case IADD_IMUL_OP:
case FADD_FMUL_OP:
case MAD_OP:
case FMA_OP:
case SETP_OP: {
int32_t currInstCat = get_op_category(ap->GetOpcode());
unsigned long long currInstCounter = atomicAdd(&injCountersInstType[currInstCat], 1LL); // update counter, return old value
bool cond = inj_info.injIGID == currInstCat && inj_info.injInstID == currInstCounter; // the current opcode matches injIGID and injInstID matches
if (inj_info.injBFM == WARP_FLIP_SINGLE_BIT || inj_info.injBFM == WARP_FLIP_TWO_BITS || inj_info.injBFM == WARP_RANDOM_VALUE || inj_info.injBFM == ZERO_VALUE || inj_info.injBFM == WARP_ZERO_VALUE) { // For warp wide injections
cond = (__any(cond) != 0) ; // __any() evaluates cond for all active threads of the warp and return non-zero if and only if cond evaluates to non-zero for any of them.
}
if(cond) {
inject_reg_error(ap, rp, inj_info.injOpSeed, inj_info.injBIDSeed, inj_info.injInstID, inj_info.injBFM, dyn_inst_count);
}
}
break;
case MISC_OP: break;
}
}
//////////////////////////////////////////////////////////////////////
// SASSI initialize, finalize, and other operations to be performed
// on kernel entry and exit
//////////////////////////////////////////////////////////////////////
static void sassi_init() {
// read seeds for random error injection
parse_params(injInputFilename.c_str()); // injParams are updated based on injection seed file
AppDynInstCount = 0;
IntervalInstCount = 0;
InjPCCount = 0;
}
//////////////////////////////////////////////////////////////////////
// This function is invoked before a cuda-kernel starts executing.
// It resets profiling counters, updated knameCount (to keep track of how many
// kernels and their invocations are done), and updates injection parameters
// that are used by SASSI before and after handlers.
//////////////////////////////////////////////////////////////////////
static void onKernelEntry(const CUpti_CallbackData *cbInfo) {
reset_profiling_counters();
// update knameCount map
std::string currKernelName = cbInfo->symbolName;
if (knameCount.find(currKernelName) == knameCount.end()) {
knameCount[currKernelName] = 0;
} else {
knameCount[currKernelName] += 1;
}
std::string injKernelName = inj_info.injKernelName;
// pass injParams if this is not the kernel of interest
bool is_inj_kernel_name = injKernelName.compare(cbInfo->symbolName) == 0; // if the current kernel name is not same as injKernelName
bool is_inj_kernel_count = (knameCount.find(injKernelName) != knameCount.end()) ? knameCount[injKernelName] == inj_info.injKCount : false; // if kernel name is found, check if injKCount matches knameCount[injKernelName]
inj_info.areParamsReady = is_inj_kernel_name && is_inj_kernel_count; // mark the injection params ready
if (inj_info.areParamsReady)
DEBUG_PRINT(INJ_DEBUG_LIGHT, "areParamsReady=%d, injkname=%s, curr kname=%s, injKCount=%d, is_inj_kernel_count=%d \n", inj_info.areParamsReady, injKernelName.c_str(), cbInfo->symbolName, inj_info.injKCount, is_inj_kernel_count);
#if TIMING
gettimeofday(&start, NULL);
#endif
}
//////////////////////////////////////////////////////////////////////
// This function is called after every cuda-kernel execution.
//////////////////////////////////////////////////////////////////////
static void onKernelExit(const CUpti_CallbackData *cbInfo) {
cudaError_t * error = (cudaError_t*) cbInfo->functionReturnValue;
if ( (*error) != cudaSuccess ) {
printf("Kernel Exit Error: %d", (*error));
}
#if INJ_MODE == RF_INJECTIONS
if (inj_info.areParamsReady) { // Check if this is the kernel of interest
if (inj_info.readyToInject && inj_info.writeBeforeRead) { // error was ready to be injected, but the register was overwritten before being read
printf("Masked: Write before read\n");
exit(0); // exit the simulation
} else if (inj_info.readyToInject && !inj_info.errorInjected) { // error was ready to be injected, but was never injected
printf("Masked: Error was never read\n");
exit(0); // exit the simulation
}
}
#endif
#if TIMING
gettimeofday(&end, NULL);
long seconds, useconds;
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
float mTime = ((seconds) * 1000 + useconds/1000.0);
printf("\nTime for %s: %f ms\n", cbInfo->symbolName, mTime);
mTotalTime += mTime;
#endif
}
static void sassi_finalize(sassi::lazy_allocator::device_reset_reason reason)
{
#if TIMING
printf("\nTotal kernel time: %f ms\n", mTotalTime);
#endif
}
static sassi::lazy_allocator injectorInit(sassi_init, sassi_finalize, onKernelEntry, onKernelExit);
|
bfc047966851ec65d6c7c124b5be194dc3254a77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
__global__ void img_reverse(uchar3* d_idata, uchar3* d_odata, int width, int height){
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = yIndex * width + xIndex;
if (xIndex < width && yIndex < height){
uchar3 rgb = d_idata[idx];
d_odata[idx].x = 255 - rgb.x;
d_odata[idx].y = 255 - rgb.y;
d_odata[idx].z = 255 - rgb.z;
}
}
} | bfc047966851ec65d6c7c124b5be194dc3254a77.cu | extern "C" {
__global__ void img_reverse(uchar3* d_idata, uchar3* d_odata, int width, int height){
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = yIndex * width + xIndex;
if (xIndex < width && yIndex < height){
uchar3 rgb = d_idata[idx];
d_odata[idx].x = 255 - rgb.x;
d_odata[idx].y = 255 - rgb.y;
d_odata[idx].z = 255 - rgb.z;
}
}
} |
1e1cfd915ed056d4256d69e4ab85583d4c5b5ab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstring>
#include "paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
nvinfer1::Dims HardSwishPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* in_dims, int nb_inputs) TRT_NOEXCEPT {
assert(nb_inputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const& input_dims = in_dims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T>
__device__ T kMax(T a, T b) {
return a > b ? a : b;
}
template <typename T>
__device__ T kMin(T a, T b) {
return a < b ? a : b;
}
template <typename T, unsigned TPB>
__global__ void hard_swish_kernel(float threshold, float scale, float offset,
int n, const T* input, T* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n) {
const T in = input[idx];
output[idx] = in / scale * kMin<T>(kMax<T>(in + offset, 0), threshold);
}
}
int HardSwishPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void*, hipStream_t stream) {
#else
void* const* outputs, void*,
hipStream_t stream) TRT_NOEXCEPT {
#endif
const auto& input_dims = this->getInputDims(0);
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
float threshold = threshold_;
float scale = scale_;
float offset = offset_;
const int block_size = 256;
const int grid_size = (num + block_size - 1) / block_size;
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
hipLaunchKernelGGL(( hard_swish_kernel<float, block_size>), dim3(grid_size), dim3(block_size), 0, stream,
threshold, scale, offset, num, input, output);
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 1e1cfd915ed056d4256d69e4ab85583d4c5b5ab2.cu | // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstring>
#include "paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
nvinfer1::Dims HardSwishPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* in_dims, int nb_inputs) TRT_NOEXCEPT {
assert(nb_inputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const& input_dims = in_dims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T>
__device__ T kMax(T a, T b) {
return a > b ? a : b;
}
template <typename T>
__device__ T kMin(T a, T b) {
return a < b ? a : b;
}
template <typename T, unsigned TPB>
__global__ void hard_swish_kernel(float threshold, float scale, float offset,
int n, const T* input, T* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n) {
const T in = input[idx];
output[idx] = in / scale * kMin<T>(kMax<T>(in + offset, 0), threshold);
}
}
int HardSwishPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void*, cudaStream_t stream) {
#else
void* const* outputs, void*,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
const auto& input_dims = this->getInputDims(0);
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
float threshold = threshold_;
float scale = scale_;
float offset = offset_;
const int block_size = 256;
const int grid_size = (num + block_size - 1) / block_size;
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
hard_swish_kernel<float, block_size><<<grid_size, block_size, 0, stream>>>(
threshold, scale, offset, num, input, output);
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
94dcae1e1efeb6164dc10b79b15cfd32c1965f37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \brief Vector dot product: h_result = SUM(A * B).
*/
#include "cuda_util.h"
// Initialize the input data.
void GenArray(const int len, float *arr) {
for (int i = 0; i < len; i++) {
arr[i] = 1;//(float)rand() / RAND_MAX + (float)rand() / (RAND_MAX*RAND_MAX);
}
}
// CPU version: 965ms
// Normal version in cpu as a reference
float VectorDotProductCPU(const float *vec_a, const float *vec_b, const int len) {
float h_result = 0;
for (int i = 0; i<len; i++) {
h_result += vec_a[i] * vec_b[i];
}
return h_result;
}
// CUDA kernel v1 : 283ms
// Multiply and save to shared memory.
// Accumulate data from all of the shared memory to fewer blocks.
template <int BLOCK_SIZE>
__global__ void VectorDotProductKernelv1(const float *vec_a, const float *vec_b, const int len, float &res) {
// Prevents memory access across the border.
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len;
i += blockDim.x * gridDim.x) {
__shared__ float smem[BLOCK_SIZE];
smem[threadIdx.x] = vec_a[i] * vec_b[i];
__syncthreads();
//// Very slow ?
//if (threadIdx.x == 0) {
// int sum = 0;
// for (int i = 0; i < BLOCK_SIZE; i++)
// sum += smem[i];
// atomicAdd(&res, sum);
//}
int count = BLOCK_SIZE / 2;
while (count >= 1) {
if(threadIdx.x < count) {
smem[threadIdx.x] += smem[count + threadIdx.x];
}
// Synchronize the threads within the block,
// then go to next round together.
__syncthreads();
count /= 2;
}
if(threadIdx.x == 0)
atomicAdd(&res, smem[0]);
}
}
// CUDA kernel v2 : 201ms
// Compute two blocks' data to the shared memory of one block.
template <int BLOCK_SIZE>
__global__ void VectorDotProductKernelv2(const float *vec_a, const float *vec_b, const int len, float &res) {
// Prevents memory access across the border.
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len / 2;
i += blockDim.x * gridDim.x) {
__shared__ float smem[BLOCK_SIZE];
smem[threadIdx.x] = vec_a[i] * vec_b[i] + vec_a[i + len / 2] * vec_b[i + len / 2]; // Mainly in here.
__syncthreads();
int count = BLOCK_SIZE >> 1;
while (count >= 1) {
if (threadIdx.x < count) {
smem[threadIdx.x] += smem[count + threadIdx.x];
}
// Synchronize the threads within the block,
// then go to next round together.
__syncthreads();
count >>= 1;
}
if (threadIdx.x == 0)
atomicAdd(&res, smem[0]);
}
}
// CUDA kernel v3 : 179ms
// Condition: The block size should be bigger than 32
// Unroll the last warp
template <int BLOCK_SIZE>
__global__ void VectorDotProductKernelv3(const float *vec_a, const float *vec_b, const int len, float &res) {
// Prevents memory access across the border.
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len / 2;
i += blockDim.x * gridDim.x) {
__shared__ float smem[BLOCK_SIZE];
smem[threadIdx.x] = vec_a[i] * vec_b[i] + vec_a[i + len / 2] * vec_b[i + len / 2];
__syncthreads();
for (int count = BLOCK_SIZE >> 1; count > 32; count >>= 1) {
if (threadIdx.x < count) {
smem[threadIdx.x] += smem[count + threadIdx.x];
}
__syncthreads();
}
////// Mainly in here. Unroll the last warp. (It still need __syncthreads() in a warp ?)
//if (threadIdx.x < 32) {
// smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 1]; __syncthreads();
//}
// warp__syncthreads()
// volatile
//
if (threadIdx.x < 32) {
volatile float *smem_t = smem;
if (blockDim.x > 32) {
smem_t[threadIdx.x] += smem_t[threadIdx.x + 32];
}
smem_t[threadIdx.x] += smem_t[threadIdx.x + 16];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 8];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 4];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 2];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 1];
}
if (threadIdx.x == 0)
atomicAdd(&res, smem[0]);
}
}
float VectorDotProductCUDA(const int loops, const float *vec_a, const float *vec_b, const int len, float &result) {
// Time recorder.
cjmcv_cuda_util::GpuTimer gpu_timer;
const int threads_per_block = 1024; // data_len % threads_per_block == 0
const int blocks_per_grid = (len + threads_per_block - 1) / threads_per_block;
// Warm up.
VectorDotProductKernelv3<threads_per_block> << <blocks_per_grid, threads_per_block >> >
(vec_a, vec_b, len, result);
gpu_timer.Start();
for (int i = 0; i < loops; i++) {
hipMemset(&result, 0, sizeof(float));
VectorDotProductKernelv3<threads_per_block> << <blocks_per_grid, threads_per_block >> >
(vec_a, vec_b, len, result);
}
gpu_timer.Stop();
return gpu_timer.ElapsedMillis();
}
int main() {
int ret = cjmcv_cuda_util::InitEnvironment(0);
if (ret != 0) {
printf("Failed to initialize the environment for cuda.");
return -1;
}
const int loops = 100;
const int data_len = 1024000; // data_len % threads_per_block == 0
const int data_mem_size = sizeof(float) * data_len;
float *h_vector_a = (float *)malloc(data_mem_size);
float *h_vector_b = (float *)malloc(data_mem_size);
if (h_vector_a == NULL || h_vector_b == NULL) {
printf("Fail to malloc.\n");
return -1;
}
// Initialize
srand(0);
GenArray(data_len, h_vector_a);
GenArray(data_len, h_vector_b);
// CPU
time_t t = clock();
float h_result = 0;
for (int i = 0; i < loops; i++)
h_result = VectorDotProductCPU(h_vector_a, h_vector_b, data_len);
printf("\nIn cpu, msec_total = %lld, h_result = %f\n", clock() - t, h_result);
// GPU
// Allocate memory in host.
float msec_total;
float *d_vector_a = NULL, *d_vector_b = NULL;
float *d_result = NULL;
CUDA_CHECK(hipMalloc((void **)&d_vector_a, data_mem_size));
CUDA_CHECK(hipMalloc((void **)&d_vector_b, data_mem_size));
CUDA_CHECK(hipMalloc((void **)&d_result, sizeof(float)));
// Copy host memory to device
CUDA_CHECK(hipMemcpy(d_vector_a, h_vector_a, data_mem_size, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_vector_b, h_vector_b, data_mem_size, hipMemcpyHostToDevice));
msec_total = VectorDotProductCUDA(loops, d_vector_a, d_vector_b, data_len, *d_result);
CUDA_CHECK(hipMemcpy(&h_result, d_result, sizeof(float), hipMemcpyDeviceToHost));
printf("\nIn gpu, msec_total = %f, h_result = %f\n", msec_total, h_result);
free(h_vector_a);
free(h_vector_b);
hipFree(d_vector_a);
hipFree(d_vector_b);
hipFree(d_result);
cjmcv_cuda_util::CleanUpEnvironment();
system("pause");
return 0;
}
| 94dcae1e1efeb6164dc10b79b15cfd32c1965f37.cu | /*!
* \brief Vector dot product: h_result = SUM(A * B).
*/
#include "cuda_util.h"
// Initialize the input data.
void GenArray(const int len, float *arr) {
for (int i = 0; i < len; i++) {
arr[i] = 1;//(float)rand() / RAND_MAX + (float)rand() / (RAND_MAX*RAND_MAX);
}
}
// CPU version: 965ms
// Normal version in cpu as a reference
float VectorDotProductCPU(const float *vec_a, const float *vec_b, const int len) {
float h_result = 0;
for (int i = 0; i<len; i++) {
h_result += vec_a[i] * vec_b[i];
}
return h_result;
}
// CUDA kernel v1 : 283ms
// Multiply and save to shared memory.
// Accumulate data from all of the shared memory to fewer blocks.
template <int BLOCK_SIZE>
__global__ void VectorDotProductKernelv1(const float *vec_a, const float *vec_b, const int len, float &res) {
// Prevents memory access across the border.
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len;
i += blockDim.x * gridDim.x) {
__shared__ float smem[BLOCK_SIZE];
smem[threadIdx.x] = vec_a[i] * vec_b[i];
__syncthreads();
//// Very slow ?
//if (threadIdx.x == 0) {
// int sum = 0;
// for (int i = 0; i < BLOCK_SIZE; i++)
// sum += smem[i];
// atomicAdd(&res, sum);
//}
int count = BLOCK_SIZE / 2;
while (count >= 1) {
if(threadIdx.x < count) {
smem[threadIdx.x] += smem[count + threadIdx.x];
}
// Synchronize the threads within the block,
// then go to next round together.
__syncthreads();
count /= 2;
}
if(threadIdx.x == 0)
atomicAdd(&res, smem[0]);
}
}
// CUDA kernel v2 : 201ms
// Compute two blocks' data to the shared memory of one block.
template <int BLOCK_SIZE>
__global__ void VectorDotProductKernelv2(const float *vec_a, const float *vec_b, const int len, float &res) {
// Prevents memory access across the border.
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len / 2;
i += blockDim.x * gridDim.x) {
__shared__ float smem[BLOCK_SIZE];
smem[threadIdx.x] = vec_a[i] * vec_b[i] + vec_a[i + len / 2] * vec_b[i + len / 2]; // Mainly in here.
__syncthreads();
int count = BLOCK_SIZE >> 1;
while (count >= 1) {
if (threadIdx.x < count) {
smem[threadIdx.x] += smem[count + threadIdx.x];
}
// Synchronize the threads within the block,
// then go to next round together.
__syncthreads();
count >>= 1;
}
if (threadIdx.x == 0)
atomicAdd(&res, smem[0]);
}
}
// CUDA kernel v3 : 179ms
// Condition: The block size should be bigger than 32
// Unroll the last warp
template <int BLOCK_SIZE>
__global__ void VectorDotProductKernelv3(const float *vec_a, const float *vec_b, const int len, float &res) {
// Prevents memory access across the border.
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < len / 2;
i += blockDim.x * gridDim.x) {
__shared__ float smem[BLOCK_SIZE];
smem[threadIdx.x] = vec_a[i] * vec_b[i] + vec_a[i + len / 2] * vec_b[i + len / 2];
__syncthreads();
for (int count = BLOCK_SIZE >> 1; count > 32; count >>= 1) {
if (threadIdx.x < count) {
smem[threadIdx.x] += smem[count + threadIdx.x];
}
__syncthreads();
}
////// Mainly in here. Unroll the last warp. (It still need __syncthreads() in a warp ?)
//if (threadIdx.x < 32) {
// smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncthreads();
// smem[threadIdx.x] += smem[threadIdx.x + 1]; __syncthreads();
//}
// 针对上面的最后一个warp仍需要加__syncthreads()的情况,需要注意:
// 编写线程束同步代码时,必须对共享内存的指针使用volatile关键字修饰,
// 否则可能会由于编译器的优化行为改变内存的操作顺序从而使结果不正确。
if (threadIdx.x < 32) {
volatile float *smem_t = smem;
if (blockDim.x > 32) {
smem_t[threadIdx.x] += smem_t[threadIdx.x + 32];
}
smem_t[threadIdx.x] += smem_t[threadIdx.x + 16];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 8];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 4];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 2];
smem_t[threadIdx.x] += smem_t[threadIdx.x + 1];
}
if (threadIdx.x == 0)
atomicAdd(&res, smem[0]);
}
}
float VectorDotProductCUDA(const int loops, const float *vec_a, const float *vec_b, const int len, float &result) {
// Time recorder.
cjmcv_cuda_util::GpuTimer gpu_timer;
const int threads_per_block = 1024; // data_len % threads_per_block == 0
const int blocks_per_grid = (len + threads_per_block - 1) / threads_per_block;
// Warm up.
VectorDotProductKernelv3<threads_per_block> << <blocks_per_grid, threads_per_block >> >
(vec_a, vec_b, len, result);
gpu_timer.Start();
for (int i = 0; i < loops; i++) {
cudaMemset(&result, 0, sizeof(float));
VectorDotProductKernelv3<threads_per_block> << <blocks_per_grid, threads_per_block >> >
(vec_a, vec_b, len, result);
}
gpu_timer.Stop();
return gpu_timer.ElapsedMillis();
}
int main() {
int ret = cjmcv_cuda_util::InitEnvironment(0);
if (ret != 0) {
printf("Failed to initialize the environment for cuda.");
return -1;
}
const int loops = 100;
const int data_len = 1024000; // data_len % threads_per_block == 0
const int data_mem_size = sizeof(float) * data_len;
float *h_vector_a = (float *)malloc(data_mem_size);
float *h_vector_b = (float *)malloc(data_mem_size);
if (h_vector_a == NULL || h_vector_b == NULL) {
printf("Fail to malloc.\n");
return -1;
}
// Initialize
srand(0);
GenArray(data_len, h_vector_a);
GenArray(data_len, h_vector_b);
// CPU
time_t t = clock();
float h_result = 0;
for (int i = 0; i < loops; i++)
h_result = VectorDotProductCPU(h_vector_a, h_vector_b, data_len);
printf("\nIn cpu, msec_total = %lld, h_result = %f\n", clock() - t, h_result);
// GPU
// Allocate memory in host.
float msec_total;
float *d_vector_a = NULL, *d_vector_b = NULL;
float *d_result = NULL;
CUDA_CHECK(cudaMalloc((void **)&d_vector_a, data_mem_size));
CUDA_CHECK(cudaMalloc((void **)&d_vector_b, data_mem_size));
CUDA_CHECK(cudaMalloc((void **)&d_result, sizeof(float)));
// Copy host memory to device
CUDA_CHECK(cudaMemcpy(d_vector_a, h_vector_a, data_mem_size, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_vector_b, h_vector_b, data_mem_size, cudaMemcpyHostToDevice));
msec_total = VectorDotProductCUDA(loops, d_vector_a, d_vector_b, data_len, *d_result);
CUDA_CHECK(cudaMemcpy(&h_result, d_result, sizeof(float), cudaMemcpyDeviceToHost));
printf("\nIn gpu, msec_total = %f, h_result = %f\n", msec_total, h_result);
free(h_vector_a);
free(h_vector_b);
cudaFree(d_vector_a);
cudaFree(d_vector_b);
cudaFree(d_result);
cjmcv_cuda_util::CleanUpEnvironment();
system("pause");
return 0;
}
|
e1d4bae07f79f7aba426f09348ed9d4f19b2c638.hip | // !!! This is a file automatically generated by hipify!!!
/* -----------------------------------------------------------------
* Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the MPIPlusX NVECTOR where
* the X is the CUDA NVECTOR.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sundials/sundials_types.h>
#include <nvector/cuda/Vector.hpp>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_mpiplusx.h>
#include <sundials/sundials_math.h>
#include "test_nvector.h"
#include <mpi.h>
/* CUDA vector can use unmanaged or managed memory */
enum mem_type { UNMANAGED, MANAGED };
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails = 0; /* counter for test failures */
int globfails = 0; /* counter for test failures */
int retval; /* function return value */
sunindextype local_length; /* local vector length */
sunindextype global_length; /* global vector length */
N_Vector U, V, X; /* local test vectors */
N_Vector plusU, plusV, plusX; /* MPIPlusX test vectors */
N_Vector plusY, plusZ; /* MPIPlusX test vectors */
int print_timing; /* turn timing on/off */
MPI_Comm comm; /* MPI Communicator */
int nprocs, myid; /* Number of procs, proc id */
int i;
/* Get processor number and total number of processes */
MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
MPI_Comm_size(comm, &nprocs);
MPI_Comm_rank(comm, &myid);
/* check inputs */
if (argc < 3) {
if (myid == 0)
printf("ERROR: TWO (2) Inputs required: vector length, print timing \n");
MPI_Abort(comm, -1);
}
local_length = (sunindextype) atol(argv[1]);
if (local_length < 1) {
if (myid == 0)
printf("ERROR: local vector length must be a positive integer \n");
MPI_Abort(comm, -1);
}
print_timing = atoi(argv[2]);
SetTiming(print_timing, myid);
/* global length */
global_length = nprocs*local_length;
for (i=UNMANAGED; i<=MANAGED; ++i) {
if (myid == 0) {
if (i==UNMANAGED) {
printf("Testing CUDA N_Vector \n");
} else {
printf("\nTesting CUDA N_Vector with managed memory \n");
}
printf("Vector global length %ld \n", (long int) global_length);
printf("MPI processes %d \n", nprocs);
}
/* Create new local vectors */
X = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length);
if (X == NULL) {
if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n");
MPI_Abort(comm, 1);
}
/* Create the MPI+X vector */
plusX = N_VMake_MPIPlusX(comm, X);
if (plusX == NULL) {
N_VDestroy(X);
if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n");
MPI_Abort(comm, 1);
}
/* Check vector ID */
fails += Test_N_VGetVectorID(plusX, SUNDIALS_NVEC_MPIPLUSX, myid);
/* Check vector length */
fails += Test_N_VGetLength(plusX, myid);
/* Check vector communicator */
fails += Test_N_VGetCommunicatorMPI(plusX, &comm, myid);
/* Test clone functions */
fails += Test_N_VCloneEmpty(plusX, myid);
fails += Test_N_VClone(plusX, local_length, myid);
fails += Test_N_VCloneEmptyVectorArray(5, plusX, myid);
fails += Test_N_VCloneVectorArray(5, plusX, local_length, myid);
/* Clone additional vectors for testing */
plusY = N_VClone(plusX);
if (plusY == NULL) {
N_VDestroy(X);
N_VDestroy(plusX);
if (myid == 0) printf("FAIL: Unable to create a new vector \n\n");
MPI_Abort(comm, 1);
}
plusZ = N_VClone(plusX);
if (plusZ == NULL) {
N_VDestroy(X);
N_VDestroy(plusX);
N_VDestroy(plusY);
if (myid == 0) printf("FAIL: Unable to create a new vector \n\n");
MPI_Abort(comm, 1);
}
/* Standard vector operation tests */
if (myid == 0) printf("\nTesting standard vector operations:\n\n");
fails += Test_N_VConst(plusX, local_length, myid);
fails += Test_N_VLinearSum(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VProd(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VDiv(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VScale(plusX, plusZ, local_length, myid);
fails += Test_N_VAbs(plusX, plusZ, local_length, myid);
fails += Test_N_VInv(plusX, plusZ, local_length, myid);
fails += Test_N_VAddConst(plusX, plusZ, local_length, myid);
fails += Test_N_VDotProd(plusX, plusY, local_length, myid);
fails += Test_N_VMaxNorm(plusX, local_length, myid);
fails += Test_N_VWrmsNorm(plusX, plusY, local_length, myid);
fails += Test_N_VWrmsNormMask(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VMin(plusX, local_length, myid);
fails += Test_N_VWL2Norm(plusX, plusY, local_length, myid);
fails += Test_N_VL1Norm(plusX, local_length, myid);
fails += Test_N_VCompare(plusX, plusZ, local_length, myid);
fails += Test_N_VInvTest(plusX, plusZ, local_length, myid);
fails += Test_N_VConstrMask(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VMinQuotient(plusX, plusY, local_length, myid);
/* Fused and vector array operations tests (disabled) */
if (myid == 0) printf("\nTesting fused and vector array operations (disabled):\n\n");
/* create vector and disable all fused and vector array operations */
U = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length);
retval = N_VEnableFusedOps_Cuda(U, SUNFALSE);
if (U == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n");
MPI_Abort(comm, 1);
}
/* create the MPIPlusX vector */
plusU = N_VMake_MPIPlusX(comm, U);
if (U == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n");
MPI_Abort(comm, 1);
}
/* fused operations */
fails += Test_N_VLinearCombination(plusU, local_length, myid);
fails += Test_N_VScaleAddMulti(plusU, local_length, myid);
fails += Test_N_VDotProdMulti(plusU, local_length, myid);
/* vector array operations */
fails += Test_N_VLinearSumVectorArray(plusU, local_length, myid);
fails += Test_N_VScaleVectorArray(plusU, local_length, myid);
fails += Test_N_VConstVectorArray(plusU, local_length, myid);
fails += Test_N_VWrmsNormVectorArray(plusU, local_length, myid);
fails += Test_N_VWrmsNormMaskVectorArray(plusU, local_length, myid);
fails += Test_N_VScaleAddMultiVectorArray(plusU, local_length, myid);
fails += Test_N_VLinearCombinationVectorArray(plusU, local_length, myid);
/* Fused and vector array operations tests (enabled) */
if (myid == 0) printf("\nTesting fused and vector array operations (enabled):\n\n");
/* create vector and enable all fused and vector array operations */
V = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length);
retval = N_VEnableFusedOps_Cuda(V, SUNTRUE);
if (V == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
N_VDestroy(plusU);
if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n");
MPI_Abort(comm, 1);
}
/* create the MPIPlusX vector */
plusV = N_VMake_MPIPlusX(comm, V);
if (V == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(V);
N_VDestroy(plusU);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n");
MPI_Abort(comm, 1);
}
/* fused operations */
fails += Test_N_VLinearCombination(plusV, local_length, myid);
fails += Test_N_VScaleAddMulti(plusV, local_length, myid);
fails += Test_N_VDotProdMulti(plusV, local_length, myid);
/* vector array operations */
fails += Test_N_VLinearSumVectorArray(plusV, local_length, myid);
fails += Test_N_VScaleVectorArray(plusV, local_length, myid);
fails += Test_N_VConstVectorArray(plusV, local_length, myid);
fails += Test_N_VWrmsNormVectorArray(plusV, local_length, myid);
fails += Test_N_VWrmsNormMaskVectorArray(plusV, local_length, myid);
fails += Test_N_VScaleAddMultiVectorArray(plusV, local_length, myid);
fails += Test_N_VLinearCombinationVectorArray(plusV, local_length, myid);
/* local reduction operations */
printf("\nTesting local reduction operations:\n\n");
fails += Test_N_VDotProdLocal(plusX, plusY, local_length, myid);
fails += Test_N_VMaxNormLocal(plusX, local_length, myid);
fails += Test_N_VMinLocal(plusX, local_length, myid);
fails += Test_N_VL1NormLocal(plusX, local_length, myid);
fails += Test_N_VWSqrSumLocal(plusX, plusY, local_length, myid);
fails += Test_N_VWSqrSumMaskLocal(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VInvTestLocal(plusX, plusZ, local_length, myid);
fails += Test_N_VConstrMaskLocal(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VMinQuotientLocal(plusX, plusY, local_length, myid);
/* Free vectors */
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(V);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
N_VDestroy(plusU);
N_VDestroy(plusV);
}
/* Print result */
if (fails) {
printf("FAIL: NVector module failed %i tests, Proc %d \n\n", fails, myid);
} else {
if (myid == 0)
printf("SUCCESS: NVector module passed all tests \n\n");
}
/* check if any other process failed */
(void) MPI_Allreduce(&fails, &globfails, 1, MPI_INT, MPI_MAX, comm);
MPI_Finalize();
return(globfails);
}
/* ----------------------------------------------------------------------
* Implementation specific utility functions for vector tests
* --------------------------------------------------------------------*/
int check_ans(realtype ans, N_Vector plusX, sunindextype local_length)
{
int failure = 0;
sunindextype i;
realtype *Xdata;
N_Vector X;
X = N_VGetLocalVector_MPIPlusX(plusX);
N_VCopyFromDevice_Cuda(X);
Xdata = N_VGetHostArrayPointer_Cuda(X);
/* check vector data */
for (i = 0; i < local_length; i++) {
failure += FNEQ(Xdata[i], ans);
}
return (failure > ZERO) ? (1) : (0);
}
booleantype has_data(N_Vector plusX)
{
return (N_VGetLocalVector_MPIPlusX(plusX)->content == NULL) ? SUNFALSE : SUNTRUE;
}
void set_element(N_Vector plusX, sunindextype i, realtype val)
{
/* set i-th element of data array */
set_element_range(plusX, i, i, val);
}
void set_element_range(N_Vector plusX, sunindextype is, sunindextype ie,
realtype val)
{
sunindextype i;
realtype* xd;
N_Vector X;
X = N_VGetLocalVector_MPIPlusX(plusX);
/* set elements [is,ie] of the data array */
N_VCopyFromDevice_Cuda(X);
xd = N_VGetHostArrayPointer_Cuda(X);
for(i = is; i <= ie; i++) xd[i] = val;
N_VCopyToDevice_Cuda(X);
}
realtype get_element(N_Vector plusX, sunindextype i)
{
N_Vector X = N_VGetLocalVector_MPIPlusX(plusX);
/* get i-th element of data array */
N_VCopyFromDevice_Cuda(X);
return (N_VGetHostArrayPointer_Cuda(X))[i];
}
double max_time(N_Vector plusX, double time)
{
MPI_Comm *comm;
double maxt;
comm = (MPI_Comm*) N_VGetCommunicator(plusX);
/* get max time across all MPI ranks */
(void) MPI_Reduce(&time, &maxt, 1, MPI_DOUBLE, MPI_MAX, 0, *comm);
return(maxt);
}
void sync_device()
{
/* sync with GPU */
hipDeviceSynchronize();
return;
}
| e1d4bae07f79f7aba426f09348ed9d4f19b2c638.cu | /* -----------------------------------------------------------------
* Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the MPIPlusX NVECTOR where
* the X is the CUDA NVECTOR.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sundials/sundials_types.h>
#include <nvector/cuda/Vector.hpp>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_mpiplusx.h>
#include <sundials/sundials_math.h>
#include "test_nvector.h"
#include <mpi.h>
/* CUDA vector can use unmanaged or managed memory */
enum mem_type { UNMANAGED, MANAGED };
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails = 0; /* counter for test failures */
int globfails = 0; /* counter for test failures */
int retval; /* function return value */
sunindextype local_length; /* local vector length */
sunindextype global_length; /* global vector length */
N_Vector U, V, X; /* local test vectors */
N_Vector plusU, plusV, plusX; /* MPIPlusX test vectors */
N_Vector plusY, plusZ; /* MPIPlusX test vectors */
int print_timing; /* turn timing on/off */
MPI_Comm comm; /* MPI Communicator */
int nprocs, myid; /* Number of procs, proc id */
int i;
/* Get processor number and total number of processes */
MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
MPI_Comm_size(comm, &nprocs);
MPI_Comm_rank(comm, &myid);
/* check inputs */
if (argc < 3) {
if (myid == 0)
printf("ERROR: TWO (2) Inputs required: vector length, print timing \n");
MPI_Abort(comm, -1);
}
local_length = (sunindextype) atol(argv[1]);
if (local_length < 1) {
if (myid == 0)
printf("ERROR: local vector length must be a positive integer \n");
MPI_Abort(comm, -1);
}
print_timing = atoi(argv[2]);
SetTiming(print_timing, myid);
/* global length */
global_length = nprocs*local_length;
for (i=UNMANAGED; i<=MANAGED; ++i) {
if (myid == 0) {
if (i==UNMANAGED) {
printf("Testing CUDA N_Vector \n");
} else {
printf("\nTesting CUDA N_Vector with managed memory \n");
}
printf("Vector global length %ld \n", (long int) global_length);
printf("MPI processes %d \n", nprocs);
}
/* Create new local vectors */
X = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length);
if (X == NULL) {
if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n");
MPI_Abort(comm, 1);
}
/* Create the MPI+X vector */
plusX = N_VMake_MPIPlusX(comm, X);
if (plusX == NULL) {
N_VDestroy(X);
if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n");
MPI_Abort(comm, 1);
}
/* Check vector ID */
fails += Test_N_VGetVectorID(plusX, SUNDIALS_NVEC_MPIPLUSX, myid);
/* Check vector length */
fails += Test_N_VGetLength(plusX, myid);
/* Check vector communicator */
fails += Test_N_VGetCommunicatorMPI(plusX, &comm, myid);
/* Test clone functions */
fails += Test_N_VCloneEmpty(plusX, myid);
fails += Test_N_VClone(plusX, local_length, myid);
fails += Test_N_VCloneEmptyVectorArray(5, plusX, myid);
fails += Test_N_VCloneVectorArray(5, plusX, local_length, myid);
/* Clone additional vectors for testing */
plusY = N_VClone(plusX);
if (plusY == NULL) {
N_VDestroy(X);
N_VDestroy(plusX);
if (myid == 0) printf("FAIL: Unable to create a new vector \n\n");
MPI_Abort(comm, 1);
}
plusZ = N_VClone(plusX);
if (plusZ == NULL) {
N_VDestroy(X);
N_VDestroy(plusX);
N_VDestroy(plusY);
if (myid == 0) printf("FAIL: Unable to create a new vector \n\n");
MPI_Abort(comm, 1);
}
/* Standard vector operation tests */
if (myid == 0) printf("\nTesting standard vector operations:\n\n");
fails += Test_N_VConst(plusX, local_length, myid);
fails += Test_N_VLinearSum(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VProd(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VDiv(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VScale(plusX, plusZ, local_length, myid);
fails += Test_N_VAbs(plusX, plusZ, local_length, myid);
fails += Test_N_VInv(plusX, plusZ, local_length, myid);
fails += Test_N_VAddConst(plusX, plusZ, local_length, myid);
fails += Test_N_VDotProd(plusX, plusY, local_length, myid);
fails += Test_N_VMaxNorm(plusX, local_length, myid);
fails += Test_N_VWrmsNorm(plusX, plusY, local_length, myid);
fails += Test_N_VWrmsNormMask(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VMin(plusX, local_length, myid);
fails += Test_N_VWL2Norm(plusX, plusY, local_length, myid);
fails += Test_N_VL1Norm(plusX, local_length, myid);
fails += Test_N_VCompare(plusX, plusZ, local_length, myid);
fails += Test_N_VInvTest(plusX, plusZ, local_length, myid);
fails += Test_N_VConstrMask(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VMinQuotient(plusX, plusY, local_length, myid);
/* Fused and vector array operations tests (disabled) */
if (myid == 0) printf("\nTesting fused and vector array operations (disabled):\n\n");
/* create vector and disable all fused and vector array operations */
U = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length);
retval = N_VEnableFusedOps_Cuda(U, SUNFALSE);
if (U == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n");
MPI_Abort(comm, 1);
}
/* create the MPIPlusX vector */
plusU = N_VMake_MPIPlusX(comm, U);
if (U == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n");
MPI_Abort(comm, 1);
}
/* fused operations */
fails += Test_N_VLinearCombination(plusU, local_length, myid);
fails += Test_N_VScaleAddMulti(plusU, local_length, myid);
fails += Test_N_VDotProdMulti(plusU, local_length, myid);
/* vector array operations */
fails += Test_N_VLinearSumVectorArray(plusU, local_length, myid);
fails += Test_N_VScaleVectorArray(plusU, local_length, myid);
fails += Test_N_VConstVectorArray(plusU, local_length, myid);
fails += Test_N_VWrmsNormVectorArray(plusU, local_length, myid);
fails += Test_N_VWrmsNormMaskVectorArray(plusU, local_length, myid);
fails += Test_N_VScaleAddMultiVectorArray(plusU, local_length, myid);
fails += Test_N_VLinearCombinationVectorArray(plusU, local_length, myid);
/* Fused and vector array operations tests (enabled) */
if (myid == 0) printf("\nTesting fused and vector array operations (enabled):\n\n");
/* create vector and enable all fused and vector array operations */
V = (i==UNMANAGED) ? N_VNew_Cuda(local_length) : N_VNewManaged_Cuda(local_length);
retval = N_VEnableFusedOps_Cuda(V, SUNTRUE);
if (V == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
N_VDestroy(plusU);
if (myid == 0) printf("FAIL: Unable to create a new CUDA vector \n\n");
MPI_Abort(comm, 1);
}
/* create the MPIPlusX vector */
plusV = N_VMake_MPIPlusX(comm, V);
if (V == NULL || retval != 0) {
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(V);
N_VDestroy(plusU);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
if (myid == 0) printf("FAIL: Unable to create a new MPIPlusX vector \n\n");
MPI_Abort(comm, 1);
}
/* fused operations */
fails += Test_N_VLinearCombination(plusV, local_length, myid);
fails += Test_N_VScaleAddMulti(plusV, local_length, myid);
fails += Test_N_VDotProdMulti(plusV, local_length, myid);
/* vector array operations */
fails += Test_N_VLinearSumVectorArray(plusV, local_length, myid);
fails += Test_N_VScaleVectorArray(plusV, local_length, myid);
fails += Test_N_VConstVectorArray(plusV, local_length, myid);
fails += Test_N_VWrmsNormVectorArray(plusV, local_length, myid);
fails += Test_N_VWrmsNormMaskVectorArray(plusV, local_length, myid);
fails += Test_N_VScaleAddMultiVectorArray(plusV, local_length, myid);
fails += Test_N_VLinearCombinationVectorArray(plusV, local_length, myid);
/* local reduction operations */
printf("\nTesting local reduction operations:\n\n");
fails += Test_N_VDotProdLocal(plusX, plusY, local_length, myid);
fails += Test_N_VMaxNormLocal(plusX, local_length, myid);
fails += Test_N_VMinLocal(plusX, local_length, myid);
fails += Test_N_VL1NormLocal(plusX, local_length, myid);
fails += Test_N_VWSqrSumLocal(plusX, plusY, local_length, myid);
fails += Test_N_VWSqrSumMaskLocal(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VInvTestLocal(plusX, plusZ, local_length, myid);
fails += Test_N_VConstrMaskLocal(plusX, plusY, plusZ, local_length, myid);
fails += Test_N_VMinQuotientLocal(plusX, plusY, local_length, myid);
/* Free vectors */
N_VDestroy(X);
N_VDestroy(U);
N_VDestroy(V);
N_VDestroy(plusX);
N_VDestroy(plusY);
N_VDestroy(plusZ);
N_VDestroy(plusU);
N_VDestroy(plusV);
}
/* Print result */
if (fails) {
printf("FAIL: NVector module failed %i tests, Proc %d \n\n", fails, myid);
} else {
if (myid == 0)
printf("SUCCESS: NVector module passed all tests \n\n");
}
/* check if any other process failed */
(void) MPI_Allreduce(&fails, &globfails, 1, MPI_INT, MPI_MAX, comm);
MPI_Finalize();
return(globfails);
}
/* ----------------------------------------------------------------------
* Implementation specific utility functions for vector tests
* --------------------------------------------------------------------*/
int check_ans(realtype ans, N_Vector plusX, sunindextype local_length)
{
int failure = 0;
sunindextype i;
realtype *Xdata;
N_Vector X;
X = N_VGetLocalVector_MPIPlusX(plusX);
N_VCopyFromDevice_Cuda(X);
Xdata = N_VGetHostArrayPointer_Cuda(X);
/* check vector data */
for (i = 0; i < local_length; i++) {
failure += FNEQ(Xdata[i], ans);
}
return (failure > ZERO) ? (1) : (0);
}
booleantype has_data(N_Vector plusX)
{
return (N_VGetLocalVector_MPIPlusX(plusX)->content == NULL) ? SUNFALSE : SUNTRUE;
}
void set_element(N_Vector plusX, sunindextype i, realtype val)
{
/* set i-th element of data array */
set_element_range(plusX, i, i, val);
}
void set_element_range(N_Vector plusX, sunindextype is, sunindextype ie,
realtype val)
{
sunindextype i;
realtype* xd;
N_Vector X;
X = N_VGetLocalVector_MPIPlusX(plusX);
/* set elements [is,ie] of the data array */
N_VCopyFromDevice_Cuda(X);
xd = N_VGetHostArrayPointer_Cuda(X);
for(i = is; i <= ie; i++) xd[i] = val;
N_VCopyToDevice_Cuda(X);
}
realtype get_element(N_Vector plusX, sunindextype i)
{
N_Vector X = N_VGetLocalVector_MPIPlusX(plusX);
/* get i-th element of data array */
N_VCopyFromDevice_Cuda(X);
return (N_VGetHostArrayPointer_Cuda(X))[i];
}
double max_time(N_Vector plusX, double time)
{
MPI_Comm *comm;
double maxt;
comm = (MPI_Comm*) N_VGetCommunicator(plusX);
/* get max time across all MPI ranks */
(void) MPI_Reduce(&time, &maxt, 1, MPI_DOUBLE, MPI_MAX, 0, *comm);
return(maxt);
}
void sync_device()
{
/* sync with GPU */
cudaDeviceSynchronize();
return;
}
|
4f5b409c63862d5bdda48c4a2208ab9d764e39d2.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
namespace at {
namespace native {
namespace {
const char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);
} // namespace native
} // namespace at
| 4f5b409c63862d5bdda48c4a2208ab9d764e39d2.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at {
namespace native {
namespace {
const char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);
} // namespace native
} // namespace at
|
61bca6ec3bf2f83e100f90fcfa2245ce8cf9ee8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
///////////////////////////////// DEVICE FUNCTIONS /////////////////////////////////
__device__ void _bicubicCoeff(float* alpha, float* psi, int pitch, int i, int j);
__device__ void _newtonStep(float* A, float& x, float& y, float& error, int xi, int yi);
__device__ int sgn(float val) {
return ((0.0 < val) ? 1 : -1 );
}
///////////////////////////////// GLOBAL GPU FUNCTIONS /////////////////////////////////
// Determines which voxels are active.
// Gets coefficients for interpolating polynomial for each active voxel.
__global__ void getVoxels(float* psi, int pitch, int* voxelList, float* alphaList, int Nx, int Ny)
{
int row = blockDim.x*blockIdx.x + threadIdx.x;
int col = blockDim.y*blockIdx.y + threadIdx.y;
if (row<Nx-1 && col<Ny-1)
{
int idx = row * pitch + col;
int pairity = sgn(psi[idx]) + sgn(psi[idx+pitch]) + sgn(psi[idx+pitch+1]) + sgn(psi[idx+1]);
if (-3<pairity && pairity<3)
{
int old = atomicAdd(voxelList,1);
*(voxelList + old + 1) = idx;
_bicubicCoeff(alphaList+16*old, psi, pitch, row, col);
}
}
}
__global__ void reinitPhi(float* phi, int pitch, float* psi, int* voxelList, float* alphaList,
int Nx, int Ny, float dx, float thres)
{
int row = blockDim.x*blockIdx.x + threadIdx.x;
int col = blockDim.y*blockIdx.y + threadIdx.y;
if (row<Nx && col<Ny)
{
float* alpha;
int idx, r ,c;
float minDist = 7770000.0; //for error checking
float xO, yO;
float error;
for(int k = 0; k < voxelList[0]; ++k)
{
idx = voxelList[k+1];
alpha = alphaList + 16*k;
r = idx/pitch;
c = idx%pitch;
xO = .5;
yO = .5;
bool inVoxel = true;
do
{
_newtonStep(alpha, xO, yO, error, row-r, col-c);
inVoxel = (yO>=-0.1f) && (yO<=1.1f) && (xO>=-0.1f) && (xO<=1.1f);
} while (error>thres && inVoxel);
if (inVoxel){
float xdist = (row-r-xO);
float ydist = (col-c-yO);
minDist = min(minDist, dx*sqrt(ydist*ydist + xdist*xdist));
}
}
// if (minDist>100){
// printf("\nWHAT IS UP HERE? err=%3.3f, (x,y)=(%2.2f,%2.2f) [r,c]=%d,%d\n", error, xO,yO,row,col );
// }
phi[row*pitch+col] = sgn(psi[row*pitch+col]) * minDist;
}
}
///////////////////////////////// DEVICE FUNCTION IMPLEMENTATIONS /////////////////////////////////
__device__ void _bicubicCoeff(float* alpha, float* psi, int pitch, int i, int j)
{
int idx = i*pitch + j;
float f00, f10, f01, f11;
float fx00, fx10, fx01, fx11;
float fy00, fy10, fy01, fy11;
float fxy00, fxy10, fxy01, fxy11;
//f00 = psi[idx];
//f01 = psi[idx+pitch];//psi[idx+1];
//f10 = psi[idx+1];//psi[idx+N];
//f11 = psi[idx+pitch+1];//psi[idx+N+1];
//fy00 = (psi[idx+pitch]-psi[idx-pitch])/2.0;//(psi[idx+1]-psi[idx-1])/2.0;
//fy01 = (psi[idx+2*pitch]-psi[idx])/2.0;//(psi[idx+2]-psi[idx])/2.0;
//fy10 = (psi[idx+pitch+1]-psi[idx-pitch+1])/2.0;//(psi[idx+N+1]-psi[idx+N-1])/2.0;
//fy11 = (psi[idx+2*pitch+1]-psi[idx+1])/2.0;//(psi[idx+N+2]-psi[idx+N])/2.0;
//fx00 = (psi[idx+1]-psi[idx-1])/2.0;//(psi[idx+N]-psi[idx-N])/2.0;
//fx01 = (psi[idx+pitch+1]-psi[idx+pitch-1])/2.0;//(psi[idx+N+1]-psi[idx-N+1])/2.0;
//fx10 = (psi[idx+2]-psi[idx])/2.0;//(psi[idx+2*N]-psi[idx])/2.0;
//fx11 = (psi[idx+pitch+2]-psi[idx+pitch])/2.0;//(psi[idx+2*N+1]-psi[idx+1])/2.0;
//fxy00 = (psi[idx+pitch+1]-psi[idx+pitch-1]-psi[idx-pitch+1]+psi[idx-pitch-1])/4.0;
//fxy01 = (psi[idx+2*pitch+1]-psi[idx+2*pitch-1]-psi[idx+1]+psi[idx-1])/4.0;
//fxy10 = (psi[idx+pitch+2]-psi[idx+pitch]-psi[idx-pitch+2]+psi[idx-pitch])/4.0;
//fxy11 = (psi[idx+2*pitch+2]-psi[idx+2*pitch]-psi[idx+2]+psi[idx])/4.0;
f00 = psi[idx];
f01 = psi[idx+1];
f10 = psi[idx+pitch];
f11 = psi[idx+pitch+1];
fy00 = (psi[idx+1]-psi[idx-1])/2.0;
fy01 = (psi[idx+2]-psi[idx])/2.0;
fy10 = (psi[idx+pitch+1]-psi[idx+pitch-1])/2.0;
fy11 = (psi[idx+pitch+2]-psi[idx+pitch])/2.0;
fx00 = (psi[idx+pitch]-psi[idx-pitch])/2.0;
fx01 = (psi[idx+pitch+1]-psi[idx-pitch+1])/2.0;
fx10 = (psi[idx+2*pitch]-psi[idx])/2.0;
fx11 = (psi[idx+2*pitch+1]-psi[idx+1])/2.0;
fxy00 = (psi[idx+pitch+1]-psi[idx+1-pitch]-psi[idx-1+pitch]+psi[idx-pitch-1])/4.0;
fxy01 = (psi[idx+pitch+2]-psi[idx-pitch+2]-psi[idx+pitch]+psi[idx-pitch])/4.0;
fxy10 = (psi[idx+2*pitch+1]-psi[idx+1]-psi[idx+2*pitch-1]+psi[idx-1])/4.0;
fxy11 = (psi[idx+2*pitch+2]-psi[idx+2]-psi[idx+2*pitch]+psi[idx])/4.0;
alpha[0] = f00;
alpha[1] = fy00;
alpha[2] = -3*f00 + 3*f01 - 2*fy00 - fy01;
alpha[3] = 2*f00 - 2*f01 + fy00 + fy01;
alpha[4] = fx00;
alpha[5] = fxy00;
alpha[6] = -3*fx00 + 3*fx01 - 2*fxy00 - fxy10;
alpha[7] = 2*fx00 - 2*fx01 + fxy00 + fxy01;
alpha[8] = -3*f00 + 3*f10 - 2*fx00 - fx10;
alpha[9] = -3*fy00 + 3*fy10 - 2*fxy00 -fxy10;
alpha[10] = 9*f00 -9*f01 -9*f10 +9*f11 +6*fy00 +3*fy01 -6*fy10 -3*fy11
+6*fx00 -6*fx01 +3*fx10 -3*fx11 +4*fxy00 +2*fxy01 +2*fxy10 + fxy11;
alpha[11] = -6*f00 +6*f01 +6*f10 -6*f11 -3*fy00 -3*fy01 +3*fy10 +3*fy11
-4*fx00 +4*fx01 -2*fx10 +2*fx11 -2*fxy00 -2*fxy01 - fxy10 - fxy11;
alpha[12] = 2*f00 - 2*f10 + fx00 + fx10;
alpha[13] = 2*fy00 - 2*fy10 + fxy00 + fxy10;
alpha[14] = -6*f00 +6*f01 +6*f10 -6*f11 -4*fy00 -2*fy01 +4*fy10 +2*fy11
-3*fx00 +3*fx01 -3*fx10 +3*fx11 -2*fxy00 -fxy01 -2*fxy10 - fxy11;
alpha[15] = 4*f00 -4*f01 -4*f10 +4*f11 +2*fy00 +2*fy01 -2*fy10 -2*fy11
+2*fx00 -2*fx01 +2*fx10 -2*fx11 + fxy00 + fxy01 + fxy10 + fxy11;
}//_bicubicCoeff
__device__ void _newtonStep(float* A, float& x, float& y, float& error, int xi, int yi)
{
float p, px, py, pxx, pyy, pxy;
float d1, d2, d3, D;
float y2 = y*y;
float y3 = y2*y;
float x2 = x*x;
float x3 = x2*x;
p = A[0] + A[1]*y + A[2]*y2 + A[3]*y3
+ (A[4] + A[5]*y + A[6]*y2 + A[7]*y3)*x
+ (A[8] + A[9]*y + A[10]*y2 + A[11]*y3)*x2
+ (A[12] + A[13]*y + A[14]*y2 + A[15]*y3)*x3;
py = A[1] + 2*A[2]*y + 3*A[3]*y2
+ (A[5] + 2*A[6]*y + 3*A[7]*y2)*x
+ (A[9] + 2*A[10]*y + 3*A[11]*y2)*x2
+ (A[13] + 2*A[14]*y + 3*A[15]*y2)*x3;
px = A[4] + 2*A[8]*x + 3*A[12]*x2
+ (A[5] + 2*A[9]*x + 3*A[13]*x2)*y
+ (A[6] + 2*A[10]*x + 3*A[14]*x2)*y2
+ (A[7] + 2*A[11]*x + 3*A[15]*x2)*y3;
pyy = 2*A[2] + 6*A[3]*y + (2*A[6] + 6*A[7]*y)*x
+ (2*A[10] + 6*A[11]*y)*x2
+ (2*A[14] + 6*A[15]*y)*x3;
pxx = 2*A[8] + 6*A[12]*x + (2*A[9] + 6*A[13]*x)*y
+ (2*A[10] + 6*A[14]*x)*y2
+ (2*A[11] + 6*A[15]*x)*y3;
pxy = A[5] + 2*A[6]*y + 3*A[7]*y2 +
(A[9] + 2*A[10]*y + 3*A[11]*y2)*2*x +
(A[13] + 2*A[14]*y + 3*A[15]*y2)*3*x2;
d1 = py*(x-xi) - px*(y-yi);
d2 = pyy*(x-xi) - pxy*(y-yi) - px;
d3 = pxy*(x-xi) - pxx*(y-yi) + py ;
D = py*d3 - px*d2;
error = p*p + d1*d1;
y -= ( p*d3 - px*d1) / D;
x -= ( py*d1 - p*d2 ) / D;
}//_newtonStep
| 61bca6ec3bf2f83e100f90fcfa2245ce8cf9ee8b.cu |
#include <stdio.h>
#include <math.h>
///////////////////////////////// DEVICE FUNCTIONS /////////////////////////////////
__device__ void _bicubicCoeff(float* alpha, float* psi, int pitch, int i, int j);
__device__ void _newtonStep(float* A, float& x, float& y, float& error, int xi, int yi);
__device__ int sgn(float val) {
return ((0.0 < val) ? 1 : -1 );
}
///////////////////////////////// GLOBAL GPU FUNCTIONS /////////////////////////////////
// Determines which voxels are active.
// Gets coefficients for interpolating polynomial for each active voxel.
__global__ void getVoxels(float* psi, int pitch, int* voxelList, float* alphaList, int Nx, int Ny)
{
int row = blockDim.x*blockIdx.x + threadIdx.x;
int col = blockDim.y*blockIdx.y + threadIdx.y;
if (row<Nx-1 && col<Ny-1)
{
int idx = row * pitch + col;
int pairity = sgn(psi[idx]) + sgn(psi[idx+pitch]) + sgn(psi[idx+pitch+1]) + sgn(psi[idx+1]);
if (-3<pairity && pairity<3)
{
int old = atomicAdd(voxelList,1);
*(voxelList + old + 1) = idx;
_bicubicCoeff(alphaList+16*old, psi, pitch, row, col);
}
}
}
__global__ void reinitPhi(float* phi, int pitch, float* psi, int* voxelList, float* alphaList,
int Nx, int Ny, float dx, float thres)
{
int row = blockDim.x*blockIdx.x + threadIdx.x;
int col = blockDim.y*blockIdx.y + threadIdx.y;
if (row<Nx && col<Ny)
{
float* alpha;
int idx, r ,c;
float minDist = 7770000.0; //for error checking
float xO, yO;
float error;
for(int k = 0; k < voxelList[0]; ++k)
{
idx = voxelList[k+1];
alpha = alphaList + 16*k;
r = idx/pitch;
c = idx%pitch;
xO = .5;
yO = .5;
bool inVoxel = true;
do
{
_newtonStep(alpha, xO, yO, error, row-r, col-c);
inVoxel = (yO>=-0.1f) && (yO<=1.1f) && (xO>=-0.1f) && (xO<=1.1f);
} while (error>thres && inVoxel);
if (inVoxel){
float xdist = (row-r-xO);
float ydist = (col-c-yO);
minDist = min(minDist, dx*sqrt(ydist*ydist + xdist*xdist));
}
}
// if (minDist>100){
// printf("\nWHAT IS UP HERE? err=%3.3f, (x,y)=(%2.2f,%2.2f) [r,c]=%d,%d\n", error, xO,yO,row,col );
// }
phi[row*pitch+col] = sgn(psi[row*pitch+col]) * minDist;
}
}
///////////////////////////////// DEVICE FUNCTION IMPLEMENTATIONS /////////////////////////////////
__device__ void _bicubicCoeff(float* alpha, float* psi, int pitch, int i, int j)
{
int idx = i*pitch + j;
float f00, f10, f01, f11;
float fx00, fx10, fx01, fx11;
float fy00, fy10, fy01, fy11;
float fxy00, fxy10, fxy01, fxy11;
//f00 = psi[idx];
//f01 = psi[idx+pitch];//psi[idx+1];
//f10 = psi[idx+1];//psi[idx+N];
//f11 = psi[idx+pitch+1];//psi[idx+N+1];
//fy00 = (psi[idx+pitch]-psi[idx-pitch])/2.0;//(psi[idx+1]-psi[idx-1])/2.0;
//fy01 = (psi[idx+2*pitch]-psi[idx])/2.0;//(psi[idx+2]-psi[idx])/2.0;
//fy10 = (psi[idx+pitch+1]-psi[idx-pitch+1])/2.0;//(psi[idx+N+1]-psi[idx+N-1])/2.0;
//fy11 = (psi[idx+2*pitch+1]-psi[idx+1])/2.0;//(psi[idx+N+2]-psi[idx+N])/2.0;
//fx00 = (psi[idx+1]-psi[idx-1])/2.0;//(psi[idx+N]-psi[idx-N])/2.0;
//fx01 = (psi[idx+pitch+1]-psi[idx+pitch-1])/2.0;//(psi[idx+N+1]-psi[idx-N+1])/2.0;
//fx10 = (psi[idx+2]-psi[idx])/2.0;//(psi[idx+2*N]-psi[idx])/2.0;
//fx11 = (psi[idx+pitch+2]-psi[idx+pitch])/2.0;//(psi[idx+2*N+1]-psi[idx+1])/2.0;
//fxy00 = (psi[idx+pitch+1]-psi[idx+pitch-1]-psi[idx-pitch+1]+psi[idx-pitch-1])/4.0;
//fxy01 = (psi[idx+2*pitch+1]-psi[idx+2*pitch-1]-psi[idx+1]+psi[idx-1])/4.0;
//fxy10 = (psi[idx+pitch+2]-psi[idx+pitch]-psi[idx-pitch+2]+psi[idx-pitch])/4.0;
//fxy11 = (psi[idx+2*pitch+2]-psi[idx+2*pitch]-psi[idx+2]+psi[idx])/4.0;
f00 = psi[idx];
f01 = psi[idx+1];
f10 = psi[idx+pitch];
f11 = psi[idx+pitch+1];
fy00 = (psi[idx+1]-psi[idx-1])/2.0;
fy01 = (psi[idx+2]-psi[idx])/2.0;
fy10 = (psi[idx+pitch+1]-psi[idx+pitch-1])/2.0;
fy11 = (psi[idx+pitch+2]-psi[idx+pitch])/2.0;
fx00 = (psi[idx+pitch]-psi[idx-pitch])/2.0;
fx01 = (psi[idx+pitch+1]-psi[idx-pitch+1])/2.0;
fx10 = (psi[idx+2*pitch]-psi[idx])/2.0;
fx11 = (psi[idx+2*pitch+1]-psi[idx+1])/2.0;
fxy00 = (psi[idx+pitch+1]-psi[idx+1-pitch]-psi[idx-1+pitch]+psi[idx-pitch-1])/4.0;
fxy01 = (psi[idx+pitch+2]-psi[idx-pitch+2]-psi[idx+pitch]+psi[idx-pitch])/4.0;
fxy10 = (psi[idx+2*pitch+1]-psi[idx+1]-psi[idx+2*pitch-1]+psi[idx-1])/4.0;
fxy11 = (psi[idx+2*pitch+2]-psi[idx+2]-psi[idx+2*pitch]+psi[idx])/4.0;
alpha[0] = f00;
alpha[1] = fy00;
alpha[2] = -3*f00 + 3*f01 - 2*fy00 - fy01;
alpha[3] = 2*f00 - 2*f01 + fy00 + fy01;
alpha[4] = fx00;
alpha[5] = fxy00;
alpha[6] = -3*fx00 + 3*fx01 - 2*fxy00 - fxy10;
alpha[7] = 2*fx00 - 2*fx01 + fxy00 + fxy01;
alpha[8] = -3*f00 + 3*f10 - 2*fx00 - fx10;
alpha[9] = -3*fy00 + 3*fy10 - 2*fxy00 -fxy10;
alpha[10] = 9*f00 -9*f01 -9*f10 +9*f11 +6*fy00 +3*fy01 -6*fy10 -3*fy11
+6*fx00 -6*fx01 +3*fx10 -3*fx11 +4*fxy00 +2*fxy01 +2*fxy10 + fxy11;
alpha[11] = -6*f00 +6*f01 +6*f10 -6*f11 -3*fy00 -3*fy01 +3*fy10 +3*fy11
-4*fx00 +4*fx01 -2*fx10 +2*fx11 -2*fxy00 -2*fxy01 - fxy10 - fxy11;
alpha[12] = 2*f00 - 2*f10 + fx00 + fx10;
alpha[13] = 2*fy00 - 2*fy10 + fxy00 + fxy10;
alpha[14] = -6*f00 +6*f01 +6*f10 -6*f11 -4*fy00 -2*fy01 +4*fy10 +2*fy11
-3*fx00 +3*fx01 -3*fx10 +3*fx11 -2*fxy00 -fxy01 -2*fxy10 - fxy11;
alpha[15] = 4*f00 -4*f01 -4*f10 +4*f11 +2*fy00 +2*fy01 -2*fy10 -2*fy11
+2*fx00 -2*fx01 +2*fx10 -2*fx11 + fxy00 + fxy01 + fxy10 + fxy11;
}//_bicubicCoeff
__device__ void _newtonStep(float* A, float& x, float& y, float& error, int xi, int yi)
{
float p, px, py, pxx, pyy, pxy;
float d1, d2, d3, D;
float y2 = y*y;
float y3 = y2*y;
float x2 = x*x;
float x3 = x2*x;
p = A[0] + A[1]*y + A[2]*y2 + A[3]*y3
+ (A[4] + A[5]*y + A[6]*y2 + A[7]*y3)*x
+ (A[8] + A[9]*y + A[10]*y2 + A[11]*y3)*x2
+ (A[12] + A[13]*y + A[14]*y2 + A[15]*y3)*x3;
py = A[1] + 2*A[2]*y + 3*A[3]*y2
+ (A[5] + 2*A[6]*y + 3*A[7]*y2)*x
+ (A[9] + 2*A[10]*y + 3*A[11]*y2)*x2
+ (A[13] + 2*A[14]*y + 3*A[15]*y2)*x3;
px = A[4] + 2*A[8]*x + 3*A[12]*x2
+ (A[5] + 2*A[9]*x + 3*A[13]*x2)*y
+ (A[6] + 2*A[10]*x + 3*A[14]*x2)*y2
+ (A[7] + 2*A[11]*x + 3*A[15]*x2)*y3;
pyy = 2*A[2] + 6*A[3]*y + (2*A[6] + 6*A[7]*y)*x
+ (2*A[10] + 6*A[11]*y)*x2
+ (2*A[14] + 6*A[15]*y)*x3;
pxx = 2*A[8] + 6*A[12]*x + (2*A[9] + 6*A[13]*x)*y
+ (2*A[10] + 6*A[14]*x)*y2
+ (2*A[11] + 6*A[15]*x)*y3;
pxy = A[5] + 2*A[6]*y + 3*A[7]*y2 +
(A[9] + 2*A[10]*y + 3*A[11]*y2)*2*x +
(A[13] + 2*A[14]*y + 3*A[15]*y2)*3*x2;
d1 = py*(x-xi) - px*(y-yi);
d2 = pyy*(x-xi) - pxy*(y-yi) - px;
d3 = pxy*(x-xi) - pxx*(y-yi) + py ;
D = py*d3 - px*d2;
error = p*p + d1*d1;
y -= ( p*d3 - px*d1) / D;
x -= ( py*d1 - p*d2 ) / D;
}//_newtonStep
|
1e2a78c76b7543893ac1b97e4a7e48074736b658.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
/*
* Description:
* this function avg-pools an input 3D tensor along dimensions 1 and 2
* 3D input, 3D output
*/
__global__ void subsample(float *input, float *output,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = sum;
}
}
}
static int cunn_SpatialAveragePooling_updateOutput(lua_State *L)
{
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
float *output_data;
float *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[0];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(input);
input_data = THCudaTensor_data(input);
THCudaTensor_resize3d(output, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run subsample kernel
hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[1];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(input);
input_data = THCudaTensor_data(input);
THCudaTensor_resize4d(output, nbatch, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run subsample kernel
hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// clean
THCudaTensor_free(input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialSubsampling.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
__global__ void subgradinput(float *gradInput, float *gradOutput,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
ptr_gradInput[kx] += z;
ptr_gradInput += input_w;
}
}
}
}
static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L)
{
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
luaL_argcheck(L, dW == kW, 1, "dW and kW must be equal (this will be fixed soon)");
luaL_argcheck(L, dH == kH, 1, "dH and kH must be equal (this will be fixed soon)");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
float *gradOutput_data = THCudaTensor_data(gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(gradInput, input);
THCudaTensor_zero(gradInput);
gradInput_data = THCudaTensor_data(gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
float *gradOutput_data = THCudaTensor_data(gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(gradInput, input);
THCudaTensor_zero(gradInput);
gradInput_data = THCudaTensor_data(gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialSubsampling.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = {
{"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput},
{"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput},
{NULL, NULL}
};
static void cunn_SpatialAveragePooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn");
lua_pop(L,1);
}
| 1e2a78c76b7543893ac1b97e4a7e48074736b658.cu |
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
/*
* Description:
* this function avg-pools an input 3D tensor along dimensions 1 and 2
* 3D input, 3D output
*/
__global__ void subsample(float *input, float *output,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = sum;
}
}
}
static int cunn_SpatialAveragePooling_updateOutput(lua_State *L)
{
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
float *output_data;
float *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[0];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(input);
input_data = THCudaTensor_data(input);
THCudaTensor_resize3d(output, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run subsample kernel
subsample <<<blocks, threads>>> (input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[1];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(input);
input_data = THCudaTensor_data(input);
THCudaTensor_resize4d(output, nbatch, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run subsample kernel
subsample <<<blocks, threads>>> (input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// clean
THCudaTensor_free(input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialSubsampling.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
__global__ void subgradinput(float *gradInput, float *gradOutput,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
ptr_gradInput[kx] += z;
ptr_gradInput += input_w;
}
}
}
}
static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L)
{
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
luaL_argcheck(L, dW == kW, 1, "dW and kW must be equal (this will be fixed soon)");
luaL_argcheck(L, dH == kH, 1, "dH and kH must be equal (this will be fixed soon)");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
float *gradOutput_data = THCudaTensor_data(gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(gradInput, input);
THCudaTensor_zero(gradInput);
gradInput_data = THCudaTensor_data(gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
subgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
float *gradOutput_data = THCudaTensor_data(gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(gradInput, input);
THCudaTensor_zero(gradInput);
gradInput_data = THCudaTensor_data(gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
subgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialSubsampling.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = {
{"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput},
{"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput},
{NULL, NULL}
};
static void cunn_SpatialAveragePooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn");
lua_pop(L,1);
}
|
46cf4534048060c79d577c9b21bb96733be2db63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
#include <locale.h>
#include <iostream>
int n = 3; //
double a[] = { 0, 0, 0 }; //
double b[] = { 100, 100, 100 }; //
int m[] = { 10, 10, 10 }; //
int R = 10000; //
double p[] = { 10, 20, 30 };
//
// s
__global__ void f(double *fx, double *x, double *p, int n, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id<total; id += gridDim.x*blockDim.x)
{
fx[id] = 0;
for (int i = 0; i<n; i++)
{
fx[id] += (x[n*id + i] - p[i])*(x[n*id + i] - p[i]);
}
}
}
//
__global__ void setuprand(hiprandState_t *state, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id<total; id += gridDim.x*blockDim.x)
hiprand_init(1234, id, 0, &state[id]);
}
__global__ void fillindex(int *k, int *m, int n, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id<total; id += gridDim.x*blockDim.x)
{
for (int i = 0, j = id; i < n; i++)
{
k[n*id + i] = j%m[i];
j /= m[i];
}
}
}
//
__global__ void randvector(double *x, double *a, double *b, int *k, int *m, int n, hiprandState_t *state, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < total; id += gridDim.x*blockDim.x)
{
hiprandState_t localState = state[id];
for (int i = 0; i < n; i++)
{
double p = hiprand_uniform_double(&localState);
x[n*id + i] = ((m[i] - k[n*id + i])*a[i] + k[n*id + i] * b[i] + p*(b[i] - a[i])) / m[i];
}
state[id] = localState;
}
}
__global__ void getminimal(double *fx, double *fx1, double *x, double *x1, int n, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < total; id += gridDim.x*blockDim.x)
{
if (fx1[id] < fx[id])
{
fx[id] = fx1[id];
for (int i = 0; i < n; i++) x[n*id + i] = x1[n*id + i];
}
}
}
int main()
{
// Windows
// setlocale() , - , LC_TYPE - , .
// "Russian", , .
setlocale(LC_ALL, "");
int total = 1; for (auto i = 0; i < n; i++) total *= m[i];
int N = (1 + sqrt(total)>255) ? 255 : (int)(1 + sqrt(total));
hiprandState_t *devStates;
hipMalloc((void **)&devStates, total*sizeof(hiprandState_t));
hipLaunchKernelGGL(( setuprand) , dim3(1), dim3(N), 0, 0, devStates, total);
double *devA, *devB, *devX, *devX1, *devP, *devFX, *devFX1;
double *x, *fx;
int *devK, *devM;
hipMalloc((void **)&devA, n*sizeof(double));
hipMalloc((void **)&devB, n*sizeof(double));
hipMalloc((void **)&devP, n*sizeof(double));
hipMalloc((void **)&devM, n*sizeof(int));
hipMalloc((void **)&devK, n*total*sizeof(int));
hipMalloc((void **)&devX, n*total*sizeof(double));
hipMalloc((void **)&devX1, n*total*sizeof(double));
hipMalloc((void **)&devFX, total*sizeof(double));
hipMalloc((void **)&devFX1, total*sizeof(double));
fx = (double *)malloc(total*sizeof(double));
x = (double *)malloc(n*sizeof(double));
hipMemcpy(devA, a, n*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(devB, b, n*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(devP, p, n*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(devM, m, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fillindex) , dim3(1), dim3(N), 0, 0, devK, devM, n, total);
//
hipLaunchKernelGGL(( randvector) , dim3(1), dim3(N), 0, 0, devX, devA, devB, devK, devM, n, devStates, total);
hipLaunchKernelGGL(( f) , dim3(1), dim3(N), 0, 0, devFX, devX, devP, n, total);
for (auto r = 0; r < R; r++)
{
//std::clog << " " << std::endl;
hipLaunchKernelGGL(( randvector) , dim3(1), dim3(N), 0, 0, devX1, devA, devB, devK, devM, n, devStates, total);
hipLaunchKernelGGL(( f) , dim3(1), dim3(N), 0, 0, devFX1, devX1, devP, n, total);
hipLaunchKernelGGL(( getminimal) , dim3(1), dim3(N), 0, 0, devFX, devFX1, devX, devX1, n, total);
}
hipMemcpy(fx, devFX, total*sizeof(double), hipMemcpyDeviceToHost);
//
int index = 0;
for (int id = 1; id < total; id++)
{
if (fx[id] < fx[index])
index = id;
}
hipMemcpy(x, &devX[n*index], n*sizeof(double), hipMemcpyDeviceToHost);
//
std::cout << " : ";
for (auto i = 0; i < n; i++)
{
std::cout << x[i];
if (i < n - 1) std::cout << ",";
}
std::cout << std::endl;
std::cout << " : " << fx[index] << std::endl;
free(x);
free(fx);
hipFree(devX);
hipFree(devX1);
hipFree(devFX);
hipFree(devFX1);
hipFree(devA);
hipFree(devB);
hipFree(devP);
hipFree(devM);
hipFree(devK);
hipFree(devStates);
getchar(); //
return 0;
}
| 46cf4534048060c79d577c9b21bb96733be2db63.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
#include <locale.h>
#include <iostream>
int n = 3; // размерность пространства
double a[] = { 0, 0, 0 }; // нижняя граница значения переменных
double b[] = { 100, 100, 100 }; // верхняя граница значения переменных
int m[] = { 10, 10, 10 }; // Количество элементов решётки
int R = 10000; // количество итераций
double p[] = { 10, 20, 30 };
// Исследуемая функция
// после вызова надо сложить элементы массива s
__global__ void f(double *fx, double *x, double *p, int n, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id<total; id += gridDim.x*blockDim.x)
{
fx[id] = 0;
for (int i = 0; i<n; i++)
{
fx[id] += (x[n*id + i] - p[i])*(x[n*id + i] - p[i]);
}
}
}
// Инициализация генератора псевдослучайных чисел
__global__ void setuprand(curandState *state, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id<total; id += gridDim.x*blockDim.x)
curand_init(1234, id, 0, &state[id]);
}
__global__ void fillindex(int *k, int *m, int n, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id<total; id += gridDim.x*blockDim.x)
{
for (int i = 0, j = id; i < n; i++)
{
k[n*id + i] = j%m[i];
j /= m[i];
}
}
}
// Генератор псевдослучайного вектора
__global__ void randvector(double *x, double *a, double *b, int *k, int *m, int n, curandState *state, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < total; id += gridDim.x*blockDim.x)
{
curandState localState = state[id];
for (int i = 0; i < n; i++)
{
double p = curand_uniform_double(&localState);
x[n*id + i] = ((m[i] - k[n*id + i])*a[i] + k[n*id + i] * b[i] + p*(b[i] - a[i])) / m[i];
}
state[id] = localState;
}
}
__global__ void getminimal(double *fx, double *fx1, double *x, double *x1, int n, int total)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < total; id += gridDim.x*blockDim.x)
{
if (fx1[id] < fx[id])
{
fx[id] = fx1[id];
for (int i = 0; i < n; i++) x[n*id + i] = x1[n*id + i];
}
}
}
int main()
{
// Поддержка кириллицы в консоли Windows
// Функция setlocale() имеет два параметра, первый параметр - тип категории локали, в нашем случае LC_TYPE - набор символов, второй параметр — значение локали.
// Вместо второго аргумента можно писать "Russian", или оставлять пустые двойные кавычки, тогда набор символов будет такой же как и в ОС.
setlocale(LC_ALL, "");
int total = 1; for (auto i = 0; i < n; i++) total *= m[i];
int N = (1 + sqrt(total)>255) ? 255 : (int)(1 + sqrt(total));
curandState *devStates;
cudaMalloc((void **)&devStates, total*sizeof(curandState));
setuprand <<<1, N>>>(devStates, total);
double *devA, *devB, *devX, *devX1, *devP, *devFX, *devFX1;
double *x, *fx;
int *devK, *devM;
cudaMalloc((void **)&devA, n*sizeof(double));
cudaMalloc((void **)&devB, n*sizeof(double));
cudaMalloc((void **)&devP, n*sizeof(double));
cudaMalloc((void **)&devM, n*sizeof(int));
cudaMalloc((void **)&devK, n*total*sizeof(int));
cudaMalloc((void **)&devX, n*total*sizeof(double));
cudaMalloc((void **)&devX1, n*total*sizeof(double));
cudaMalloc((void **)&devFX, total*sizeof(double));
cudaMalloc((void **)&devFX1, total*sizeof(double));
fx = (double *)malloc(total*sizeof(double));
x = (double *)malloc(n*sizeof(double));
cudaMemcpy(devA, a, n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(devP, p, n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(devM, m, n*sizeof(int), cudaMemcpyHostToDevice);
fillindex <<<1, N>>>(devK, devM, n, total);
// Выбор начальной точки
randvector <<<1, N>>>(devX, devA, devB, devK, devM, n, devStates, total);
f <<<1, N>>>(devFX, devX, devP, n, total);
for (auto r = 0; r < R; r++)
{
//std::clog << "Выбор следующей точки" << std::endl;
randvector <<<1, N>>>(devX1, devA, devB, devK, devM, n, devStates, total);
f <<<1, N>>>(devFX1, devX1, devP, n, total);
getminimal <<<1, N>>>(devFX, devFX1, devX, devX1, n, total);
}
cudaMemcpy(fx, devFX, total*sizeof(double), cudaMemcpyDeviceToHost);
// Нахождение наименьшего значения
int index = 0;
for (int id = 1; id < total; id++)
{
if (fx[id] < fx[index])
index = id;
}
cudaMemcpy(x, &devX[n*index], n*sizeof(double), cudaMemcpyDeviceToHost);
// Вывод результатов
std::cout << "Точка минимума : ";
for (auto i = 0; i < n; i++)
{
std::cout << x[i];
if (i < n - 1) std::cout << ",";
}
std::cout << std::endl;
std::cout << "Значение минимума : " << fx[index] << std::endl;
free(x);
free(fx);
cudaFree(devX);
cudaFree(devX1);
cudaFree(devFX);
cudaFree(devFX1);
cudaFree(devA);
cudaFree(devB);
cudaFree(devP);
cudaFree(devM);
cudaFree(devK);
cudaFree(devStates);
getchar(); // Ожидание ввода с клавиатуры перед завершением программы
return 0;
}
|
e6091e4495507f10e79a8c9105353c433c3f96f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void add(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
{
sum[i] = a[i] + b[i];
}
} | e6091e4495507f10e79a8c9105353c433c3f96f3.cu | extern "C"
__global__ void add(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
{
sum[i] = a[i] + b[i];
}
} |
b98f84b28e325d4802e178efbacb949193a719de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012-2013, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "telescope/station/oskar_evaluate_element_weights_dft_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_evaluate_element_weights_dft_cuda_f(float2* d_weights,
int num_elements, float wavenumber, const float* d_x,
const float* d_y, const float* d_z, float x_beam, float y_beam,
float z_beam)
{
int num_blocks, num_threads = 256;
num_blocks = (num_elements + num_threads - 1) / num_threads;
oskar_evaluate_element_weights_dft_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_weights, num_elements,
wavenumber, d_x, d_y, d_z, x_beam, y_beam, z_beam);
}
/* Double precision. */
void oskar_evaluate_element_weights_dft_cuda_d(double2* d_weights,
int num_elements, double wavenumber, const double* d_x,
const double* d_y, const double* d_z, double x_beam, double y_beam,
double z_beam)
{
int num_blocks, num_threads = 256;
num_blocks = (num_elements + num_threads - 1) / num_threads;
oskar_evaluate_element_weights_dft_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_weights, num_elements,
wavenumber, d_x, d_y, d_z, x_beam, y_beam, z_beam);
}
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_evaluate_element_weights_dft_cudak_f(float2* weights,
const int n_in, const float wavenumber, const float* x_in,
const float* y_in, const float* z_in, const float x_out,
const float y_out, const float z_out)
{
float cxi, cyi, czi, phase;
float2 weight;
/* Get input index. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n_in) return;
/* Cache input data from global memory. */
cxi = wavenumber * x_in[i];
cyi = wavenumber * y_in[i];
czi = wavenumber * z_in[i];
/* Compute the geometric phase of the output direction. */
phase = cxi * x_out;
phase += cyi * y_out;
phase += czi * z_out;
sincosf(-phase, &weight.y, &weight.x);
/* Write result to global memory. */
weights[i] = weight;
}
/* Double precision. */
__global__
void oskar_evaluate_element_weights_dft_cudak_d(double2* weights,
const int n_in, const double wavenumber, const double* x_in,
const double* y_in, const double* z_in, const double x_out,
const double y_out, const double z_out)
{
double cxi, cyi, czi, phase;
double2 weight;
/* Get input index. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n_in) return;
/* Cache input data from global memory. */
cxi = wavenumber * x_in[i];
cyi = wavenumber * y_in[i];
czi = wavenumber * z_in[i];
/* Compute the geometric phase of the output direction. */
phase = cxi * x_out;
phase += cyi * y_out;
phase += czi * z_out;
sincos(-phase, &weight.y, &weight.x);
/* Write result to global memory. */
weights[i] = weight;
}
#ifdef __cplusplus
}
#endif
| b98f84b28e325d4802e178efbacb949193a719de.cu | /*
* Copyright (c) 2012-2013, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "telescope/station/oskar_evaluate_element_weights_dft_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_evaluate_element_weights_dft_cuda_f(float2* d_weights,
int num_elements, float wavenumber, const float* d_x,
const float* d_y, const float* d_z, float x_beam, float y_beam,
float z_beam)
{
int num_blocks, num_threads = 256;
num_blocks = (num_elements + num_threads - 1) / num_threads;
oskar_evaluate_element_weights_dft_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_weights, num_elements,
wavenumber, d_x, d_y, d_z, x_beam, y_beam, z_beam);
}
/* Double precision. */
void oskar_evaluate_element_weights_dft_cuda_d(double2* d_weights,
int num_elements, double wavenumber, const double* d_x,
const double* d_y, const double* d_z, double x_beam, double y_beam,
double z_beam)
{
int num_blocks, num_threads = 256;
num_blocks = (num_elements + num_threads - 1) / num_threads;
oskar_evaluate_element_weights_dft_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_weights, num_elements,
wavenumber, d_x, d_y, d_z, x_beam, y_beam, z_beam);
}
/* Kernels. ================================================================ */
/* Single precision. */
__global__
void oskar_evaluate_element_weights_dft_cudak_f(float2* weights,
const int n_in, const float wavenumber, const float* x_in,
const float* y_in, const float* z_in, const float x_out,
const float y_out, const float z_out)
{
float cxi, cyi, czi, phase;
float2 weight;
/* Get input index. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n_in) return;
/* Cache input data from global memory. */
cxi = wavenumber * x_in[i];
cyi = wavenumber * y_in[i];
czi = wavenumber * z_in[i];
/* Compute the geometric phase of the output direction. */
phase = cxi * x_out;
phase += cyi * y_out;
phase += czi * z_out;
sincosf(-phase, &weight.y, &weight.x);
/* Write result to global memory. */
weights[i] = weight;
}
/* Double precision. */
__global__
void oskar_evaluate_element_weights_dft_cudak_d(double2* weights,
const int n_in, const double wavenumber, const double* x_in,
const double* y_in, const double* z_in, const double x_out,
const double y_out, const double z_out)
{
double cxi, cyi, czi, phase;
double2 weight;
/* Get input index. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n_in) return;
/* Cache input data from global memory. */
cxi = wavenumber * x_in[i];
cyi = wavenumber * y_in[i];
czi = wavenumber * z_in[i];
/* Compute the geometric phase of the output direction. */
phase = cxi * x_out;
phase += cyi * y_out;
phase += czi * z_out;
sincos(-phase, &weight.y, &weight.x);
/* Write result to global memory. */
weights[i] = weight;
}
#ifdef __cplusplus
}
#endif
|
feb8769cfa6777cfa56c0d9eebceb2d640706a81.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <iterator>
#include <vector>
using std::begin;
using std::copy;
using std::cout;
using std::endl;
using std::end;
using std::generate;
using std::vector;
__global__ void add(int* out, int* a, int* b, int n) {
out[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
/* for (int i = 0; i < 10; i++) {
out[i] = a[i] + b[i];
}*/
}
int main() {
int* a, * b, * out;
int N = 100;
int size = sizeof(int) * N;
a = (int*)malloc(size);
b = (int*)malloc(size);
out = (int*)malloc(size);
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = N;
out[i] = 0;
}
int* d_a, * d_b, * d_out;
hipMalloc(&d_a, size);
hipMalloc(&d_b, size);
hipMalloc(&d_out, size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemcpy(d_out, out, size, hipMemcpyHostToDevice);
cout << " COMPLETED SUCCESSFULLY\n";
// blocks, threads per block. 1024 threads per block
// add <<<N/256 + 1, 256>>
add << <N, 1>> > (d_out, d_a, d_b, N);
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
// not &out
for (int i = 0; i < N; i++) {
cout << out[i] << endl;
}
cout << " COMPLETED SUCCESSFULLY\n";
// //// Cleanup after kernel execution
hipFree(d_a); hipFree(d_b); hipFree(d_out);
free(a); free(b);
return 0;
}
| feb8769cfa6777cfa56c0d9eebceb2d640706a81.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <iterator>
#include <vector>
using std::begin;
using std::copy;
using std::cout;
using std::endl;
using std::end;
using std::generate;
using std::vector;
__global__ void add(int* out, int* a, int* b, int n) {
out[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
/* for (int i = 0; i < 10; i++) {
out[i] = a[i] + b[i];
}*/
}
int main() {
int* a, * b, * out;
int N = 100;
int size = sizeof(int) * N;
a = (int*)malloc(size);
b = (int*)malloc(size);
out = (int*)malloc(size);
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = N;
out[i] = 0;
}
int* d_a, * d_b, * d_out;
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_out, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, size, cudaMemcpyHostToDevice);
cout << " COMPLETED SUCCESSFULLY\n";
// blocks, threads per block. 1024 threads per block
// add <<<N/256 + 1, 256>>
add << <N, 1>> > (d_out, d_a, d_b, N);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
// not &out
for (int i = 0; i < N; i++) {
cout << out[i] << endl;
}
cout << " COMPLETED SUCCESSFULLY\n";
// //// Cleanup after kernel execution
cudaFree(d_a); cudaFree(d_b); cudaFree(d_out);
free(a); free(b);
return 0;
}
|
28000723064ac2b100f9ec29650ba0f2c9a3799b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/top_k_kernel.h"
namespace phi {
namespace ops = paddle::operators;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
template <typename T, typename Context>
void TopkKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* out,
DenseTensor* indices) {
const auto* input = &x;
// get the input dims
const auto& in_dims = input->dims();
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>();
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
out->Resize(out_dims);
indices->Resize(out_dims);
}
const auto& out_dims = out->dims();
const T* input_data = input->data<T>();
T* output_data = dev_ctx.template Alloc<T>(out);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
if (k > input_width) {
k = input_width;
}
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
input,
input_width,
input_height,
k,
out,
indices,
largest)) {
// Successed, return.
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 9000
if (input_width >= 1024 && in_dims.size() == 1) {
// 1. Gather TopK, but without sorting
constexpr int max_num_threads = 1024;
if (largest) {
hipLaunchKernelGGL(( ops::RadixTopK<T, true>)
, dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(),
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
} else {
hipLaunchKernelGGL(( ops::RadixTopK<T, false>)
, dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(),
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
}
// 2. Sort if needed
if (sorted) {
DenseTensor sorted_output;
DenseTensor sorted_indices;
DenseTensor gather_indices;
sorted_output.Resize(out->dims());
sorted_indices.Resize(indices->dims());
gather_indices.Resize(indices->dims());
dev_ctx.template Alloc<T>(&sorted_output);
dev_ctx.template Alloc<int64_t>(&sorted_indices);
dev_ctx.template Alloc<int64_t>(&gather_indices);
auto* ctx =
reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
out,
k,
input_height,
k,
&sorted_output,
&sorted_indices,
largest)) {
funcs::GPUGather<int64_t, int64_t>(
dev_ctx, *indices, sorted_indices, &gather_indices);
Copy(dev_ctx, gather_indices, indices->place(), false, indices);
Copy(dev_ctx, sorted_output, out->place(), false, out);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
} else {
return;
}
}
#endif
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 20, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 5, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
phi::DDim trans_dims(in_dims);
phi::DDim trans_out_dims(out->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, *input, &trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
DenseTensor trans_ind;
DenseTensor trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 20, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 5, kBlockDim>)
, dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
}
}
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace phi
PD_REGISTER_KERNEL(top_k,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
| 28000723064ac2b100f9ec29650ba0f2c9a3799b.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/top_k_kernel.h"
namespace phi {
namespace ops = paddle::operators;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
template <typename T, typename Context>
void TopkKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
DenseTensor* out,
DenseTensor* indices) {
const auto* input = &x;
// get the input dims
const auto& in_dims = input->dims();
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>();
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
out->Resize(out_dims);
indices->Resize(out_dims);
}
const auto& out_dims = out->dims();
const T* input_data = input->data<T>();
T* output_data = dev_ctx.template Alloc<T>(out);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
if (k > input_width) {
k = input_width;
}
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
input,
input_width,
input_height,
k,
out,
indices,
largest)) {
// Successed, return.
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 9000
if (input_width >= 1024 && in_dims.size() == 1) {
// 1. Gather TopK, but without sorting
constexpr int max_num_threads = 1024;
if (largest) {
ops::RadixTopK<T, true>
<<<input_height, max_num_threads, 0, dev_ctx.stream()>>>(
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
} else {
ops::RadixTopK<T, false>
<<<input_height, max_num_threads, 0, dev_ctx.stream()>>>(
input_data,
k,
input_height,
input_width,
output_data,
indices_data);
}
// 2. Sort if needed
if (sorted) {
DenseTensor sorted_output;
DenseTensor sorted_indices;
DenseTensor gather_indices;
sorted_output.Resize(out->dims());
sorted_indices.Resize(indices->dims());
gather_indices.Resize(indices->dims());
dev_ctx.template Alloc<T>(&sorted_output);
dev_ctx.template Alloc<int64_t>(&sorted_indices);
dev_ctx.template Alloc<int64_t>(&gather_indices);
auto* ctx =
reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
out,
k,
input_height,
k,
&sorted_output,
&sorted_indices,
largest)) {
funcs::GPUGather<int64_t, int64_t>(
dev_ctx, *indices, sorted_indices, &gather_indices);
Copy(dev_ctx, gather_indices, indices->place(), false, indices);
Copy(dev_ctx, sorted_output, out->place(), false, out);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
} else {
return;
}
}
#endif
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
ops::KeMatrixTopK<T, 20, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(
ops::KeMatrixTopK<T, 5, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data,
k,
indices_data,
input_data,
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
phi::DDim trans_dims(in_dims);
phi::DDim trans_out_dims(out->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, *input, &trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
DenseTensor trans_ind;
DenseTensor trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
// The conclusion is drawn from the data through multiple sets of
// statistics
if (input_width >= 128 && k >= input_width * 0.75) {
auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>(
&dev_ctx);
if (ops::SortTopk<T>(*ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind,
largest)) {
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
return;
} else {
VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (ops::GetDesiredBlockDim(input_width)) {
#ifdef PADDLE_WITH_HIP
FIXED_BLOCK_DIM(
ops::KeMatrixTopK<T, 20, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#else
FIXED_BLOCK_DIM(
ops::KeMatrixTopK<T, 5, kBlockDim>
<<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(trans_out.data<T>(),
k,
trans_ind.data<int64_t>(),
trans_input.data<T>(),
input_width,
input_width,
static_cast<int>(k),
gridx,
input_height,
largest));
#endif
default:
PADDLE_THROW(errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, out, trans);
}
}
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
} // namespace phi
PD_REGISTER_KERNEL(top_k,
GPU,
ALL_LAYOUT,
phi::TopkKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
|
5371b6a851cf68e3040f2ca236c1acfe8c36d1c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "kernel.h"
#include "helperfunctions.h"
int main(int argc, char** argv)
{
//Ha a help argumentummal indtjuk a programot, ismertetjk a program mkdst.
if (argc == 2 && strcmp("help", argv[1]) == 0) {
printHelpMessage(stdout);
return 0;
}
//Megnzzk, hogy van-e megfelel GPU
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "You don't have a CUDA capable GPU. Buy one! Sorry.\n");
return NO_DEVICE_ERROR;
}
hipSetDevice(0); //TODO: csekkolni a hibt?????
float sigma_s, sigma_r; //a megfelel Gauss fggvnyek paramterei
int r, threads; //r: a spatial kernel sugara, threads: a blokkonknti thread-ek szma adott dimenziban
int returnValue = readConfigParameters(argc, argv, sigma_s, sigma_r, r, threads);
if (returnValue != 0) {
return returnValue;
}
cv::Mat image; //openCV fggvnnyel olvassuk be a kpet.
image = cv::imread(argv[1], 0); //beolvassuk a kpet, 8 bit szrkernyalatoss konvertljuk
if (!image.data) {
fprintf(stderr, "Could not open or find the input image\n\n");
return NO_IMAGE_ERROR;
}
int width = image.cols;
int height = image.rows;
int imageSize = width * height;
int n = 2 * r + 1; //a spatial kernel oldalnak hossza
int spatialKernelSize = n * n;
int rangeKernelSize = 511;
float *d_spatialKernel = NULL, *d_rangeKernel = NULL; //mindent NULL-ra lltunk, mert ha valami hiba van, akkor memriafoglals s hasznlat eltt is
unsigned char *d_inputImage = NULL, *d_outputImage = NULL; //felszabadthatunk egy adott pointert a freeEverything fgggvnnyel.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//Az sszes hasznlt device memrit lefoglaljuk. Ha hiba van, kilpnk.
if (!doAllMallocs(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage, spatialKernelSize, rangeKernelSize, imageSize)) {
fprintf(stderr, "hipMalloc failed!\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
return CUDA_MALLOC_ERROR;
}
//feltltjk a spatialKernel s rageKernel tmbket a GPU oldalon.
//Megj.: ezek kis mret tmbk, nem biztos, hogy ilyen kevs adat esetn jl jrunk a GPU-val.
//viszont ha CPU-n szmtannk ezeket, akkor mg msolni is kne host --> device, ez is hossz.
//Megj.: lehetne stream-eket hasznlni s prhuzamosan futtatni a kt kernelt, de nem nyernk sokat, mert a bilateral filter kernel dominl.
//radsul rgebbi GPU-k csak hipMemcpy-t s kernelhvst tudnak egytt.
hipLaunchKernelGGL(( createSpatialKernel), dim3(dim3(n, n)), dim3(1), 0, 0, d_spatialKernel, r, sigma_s);
hipLaunchKernelGGL(( createRangeKernel), dim3(1), dim3(rangeKernelSize), 0, 0, d_rangeKernel, sigma_r);
//a kpe msolsa a device-ra.
if (hipMemcpy(d_inputImage, image.data, imageSize * sizeof(unsigned char), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n\n");
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
hipEventDestroy(start);
hipEventDestroy(stop);
return CUDA_MEMCPY_ERROR;
}
//a thread-ek szma adott (vagy a felhasznl mondja meg, vagy alaprtelmezetten 32).
//az adott dimenzihoz tartoz block-ok szmt (gridDim.x, gridDim.y)a kvetkezkppen vlasztjuk meg:
//pl az X irny blokk szm (blocksX) legyen a legkisebb olyan szm, melyre threads * blocksX >= width teljesl.
//gy biztostjuk, hogy minden pixelt megprocesszljunk
int blocksX = (width + threads - 1) / threads;
int blocksY = (height + threads - 1) / threads;
//a shared memriban troljuk a spatialKernel s a rangeKernel tmbt is, a kt mretnek az sszegt kell megadnunk a kernel hvskor.
int sharedMemSize = (spatialKernelSize + rangeKernelSize) * sizeof(float);
hipLaunchKernelGGL(( bilateralFilter), dim3(dim3(blocksX, blocksY)), dim3(dim3(threads, threads)), sharedMemSize , 0,
d_inputImage, d_outputImage, d_spatialKernel, d_rangeKernel, r, width, height);
if (hipMemcpy(image.data, d_outputImage, imageSize * sizeof(unsigned char), hipMemcpyDeviceToHost) != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n\n");
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
hipEventDestroy(start);
hipEventDestroy(stop);
return CUDA_MEMCPY_ERROR;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime; //Mrjk, hogy mennyi ideig tartott a GPU specifikus utastsok vgrehajtsa.
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Execution time: %3.1f ms\n"
"with parameters: sigma_s = %3.1f, sigma_r = %3.1f, spatial kernel radius = %d, number of threads per block dim = %d\n\n",
elapsedTime, sigma_s, sigma_r, r, threads);
hipEventDestroy(start);
hipEventDestroy(stop);
if (!cv::imwrite(argv[2], image)) {
fprintf(stderr, "Failed to save the processed image.\n\n");
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
return NO_IMAGE_ERROR;
}
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
return 0; //csak akkor trnk vissza 0-val, ha minden rendben ment
} | 5371b6a851cf68e3040f2ca236c1acfe8c36d1c6.cu |
#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "kernel.h"
#include "helperfunctions.h"
int main(int argc, char** argv)
{
//Ha a help argumentummal indítjuk a programot, ismertetjük a program működését.
if (argc == 2 && strcmp("help", argv[1]) == 0) {
printHelpMessage(stdout);
return 0;
}
//Megnézzük, hogy van-e megfelelő GPU
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "You don't have a CUDA capable GPU. Buy one! Sorry.\n");
return NO_DEVICE_ERROR;
}
cudaSetDevice(0); //TODO: csekkolni a hibát?????
float sigma_s, sigma_r; //a megfelelő Gauss függvények paraméterei
int r, threads; //r: a spatial kernel sugara, threads: a blokkonkénti thread-ek száma adott dimenzióban
int returnValue = readConfigParameters(argc, argv, sigma_s, sigma_r, r, threads);
if (returnValue != 0) {
return returnValue;
}
cv::Mat image; //openCV függvénnyel olvassuk be a képet.
image = cv::imread(argv[1], 0); //beolvassuk a képet, 8 bit szürkeárnyalatossá konvertáljuk
if (!image.data) {
fprintf(stderr, "Could not open or find the input image\n\n");
return NO_IMAGE_ERROR;
}
int width = image.cols;
int height = image.rows;
int imageSize = width * height;
int n = 2 * r + 1; //a spatial kernel oldalának hossza
int spatialKernelSize = n * n;
int rangeKernelSize = 511;
float *d_spatialKernel = NULL, *d_rangeKernel = NULL; //mindent NULL-ra állítunk, mert ha valami hiba van, akkor memóriafoglalás és használat előtt is
unsigned char *d_inputImage = NULL, *d_outputImage = NULL; //felszabadíthatunk egy adott pointert a freeEverything függgvénnyel.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//Az összes használt device memóriát lefoglaljuk. Ha hiba van, kilépünk.
if (!doAllMallocs(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage, spatialKernelSize, rangeKernelSize, imageSize)) {
fprintf(stderr, "cudaMalloc failed!\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
return CUDA_MALLOC_ERROR;
}
//feltöltjük a spatialKernel és rageKernel tömböket a GPU oldalon.
//Megj.: ezek kis méretű tömbök, nem biztos, hogy ilyen kevés adat esetén jól járunk a GPU-val.
//viszont ha CPU-n számítanánk ezeket, akkor még másolni is kéne host --> device, ez is hosszú.
//Megj.: lehetne stream-eket használni és párhuzamosan futtatni a két kernelt, de nem nyerünk sokat, mert a bilateral filter kernel dominál.
//ráadásul régebbi GPU-k csak cudaMemcpy-t és kernelhívást tudnak együtt.
createSpatialKernel<<<dim3(n, n), 1>>>(d_spatialKernel, r, sigma_s);
createRangeKernel<<<1, rangeKernelSize>>>(d_rangeKernel, sigma_r);
//a képe másolása a device-ra.
if (cudaMemcpy(d_inputImage, image.data, imageSize * sizeof(unsigned char), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n\n");
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return CUDA_MEMCPY_ERROR;
}
//a thread-ek száma adott (vagy a felhasználó mondja meg, vagy alapértelmezetten 32).
//az adott dimenzióhoz tartozó block-ok számát (gridDim.x, gridDim.y)a következőképpen választjuk meg:
//pl az X irányú blokk szám (blocksX) legyen a legkisebb olyan szám, melyre threads * blocksX >= width teljesül.
//Így biztosítjuk, hogy minden pixelt megprocesszáljunk
int blocksX = (width + threads - 1) / threads;
int blocksY = (height + threads - 1) / threads;
//a shared memóriában tároljuk a spatialKernel és a rangeKernel tömböt is, a két méretnek az összegét kell megadnunk a kernel híváskor.
int sharedMemSize = (spatialKernelSize + rangeKernelSize) * sizeof(float);
bilateralFilter<<<dim3(blocksX, blocksY), dim3(threads, threads), sharedMemSize >>>
(d_inputImage, d_outputImage, d_spatialKernel, d_rangeKernel, r, width, height);
if (cudaMemcpy(image.data, d_outputImage, imageSize * sizeof(unsigned char), cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n\n");
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return CUDA_MEMCPY_ERROR;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime; //Mérjük, hogy mennyi ideig tartott a GPU specifikus utasítások végrehajtása.
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Execution time: %3.1f ms\n"
"with parameters: sigma_s = %3.1f, sigma_r = %3.1f, spatial kernel radius = %d, number of threads per block dim = %d\n\n",
elapsedTime, sigma_s, sigma_r, r, threads);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (!cv::imwrite(argv[2], image)) {
fprintf(stderr, "Failed to save the processed image.\n\n");
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
return NO_IMAGE_ERROR;
}
freeEverything(d_spatialKernel, d_rangeKernel, d_inputImage, d_outputImage);
return 0; //csak akkor térünk vissza 0-val, ha minden rendben ment
} |
6bda5af0060e8043496fa5fe42de073ba6b550a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#define SIZE (100*1024*1024)
__global__ void histo_kernel(unsigned char *buffer,long size, unsigned int *histo){
__shared__ unsigned int temp[256];
temp[threadIdx.x]=0;
int i = threadIdx.x + blockIdx.x *blockDim.x;
int offset = blockDim.x *gridDim.x;
while(i<size){
atomicAdd(&(histo[buffer[i]]),1);
i+=offset;
}
__syncthreads();
atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] );
}
int main(void){
unsigned char *buffer = (unsigned char *) big_random_block(SIZE);
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ));
HANDLE_ERROR( hipEventCreate( &stop ));
HANDLE_ERROR( hipEventRecord( start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR( hipMalloc( (void**)&dev_buffer, SIZE));
HANDLE_ERROR( hipMemcpy( dev_buffer, buffer, SIZE, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_histo, 256 * sizeof( long )));
HANDLE_ERROR( hipMemset( dev_histo, 0, 256 * sizeof( int )));
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDeviceProperties( &prop, 0 ) );
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel), dim3(blocks*2),dim3(256), 0, 0, dev_buffer, SIZE, dev_histo );
unsigned int histo[256];
HANDLE_ERROR( hipMemcpy( histo, dev_histo,256 * sizeof( int ),hipMemcpyDeviceToHost));
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
long histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld\n", histoCount );
// verify that we have the same counts via CPU
for (int i=0; i<SIZE; i++)
histo[buffer[i]]--;
for (int i=0; i<256; i++)
{
if (histo[i] != 0)
printf( "Failure at %d!\n", i );
}
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
hipFree( dev_histo );
hipFree( dev_buffer );
free( buffer );
return 0;
}
| 6bda5af0060e8043496fa5fe42de073ba6b550a4.cu | #include "util.h"
#define SIZE (100*1024*1024)
__global__ void histo_kernel(unsigned char *buffer,long size, unsigned int *histo){
__shared__ unsigned int temp[256];
temp[threadIdx.x]=0;
int i = threadIdx.x + blockIdx.x *blockDim.x;
int offset = blockDim.x *gridDim.x;
while(i<size){
atomicAdd(&(histo[buffer[i]]),1);
i+=offset;
}
__syncthreads();
atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] );
}
int main(void){
unsigned char *buffer = (unsigned char *) big_random_block(SIZE);
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ));
HANDLE_ERROR( cudaEventCreate( &stop ));
HANDLE_ERROR( cudaEventRecord( start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR( cudaMalloc( (void**)&dev_buffer, SIZE));
HANDLE_ERROR( cudaMemcpy( dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_histo, 256 * sizeof( long )));
HANDLE_ERROR( cudaMemset( dev_histo, 0, 256 * sizeof( int )));
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDeviceProperties( &prop, 0 ) );
int blocks = prop.multiProcessorCount;
histo_kernel<<<blocks*2,256>>>( dev_buffer, SIZE, dev_histo );
unsigned int histo[256];
HANDLE_ERROR( cudaMemcpy( histo, dev_histo,256 * sizeof( int ),cudaMemcpyDeviceToHost));
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
long histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld\n", histoCount );
// verify that we have the same counts via CPU
for (int i=0; i<SIZE; i++)
histo[buffer[i]]--;
for (int i=0; i<256; i++)
{
if (histo[i] != 0)
printf( "Failure at %d!\n", i );
}
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
cudaFree( dev_histo );
cudaFree( dev_buffer );
free( buffer );
return 0;
}
|
4a4a53f62f509faa680b530f2e02c6816b2be269.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
* Copyrights (c) Marwan Abdellah. All rights reserved.
* This code is part of my Master's Thesis Project entitled "High
* Performance Fourier Volume Rendering on Graphics Processing Units
* (GPUs)" and submitted to the Systems & Biomedical Engineering
* Department, Faculty of Engineering, Cairo University.
* Please, don't use or distribute without authors' permission.
* File : Volume
* Author(s) : Marwan Abdellah <[email protected]>
* Created : April 2011
* Description :
* Note(s) :
*********************************************************************/
#ifndef _MAXSUBARRAY_KERNEL_CU_
#define _MAXSUBARRAY_KERNEL_CU_
#include <cutil_inline.h>
#include "Shared.h"
__global__
void findMax(int numRows, int numCols, Max* dev_maxValues, int* dev_inputArray)
{
// Calculating the correct index from the configuration
int index = blockIdx.x * blockDim.x + threadIdx.x;
// In between parameters
// TODO: Salah to indicate what are the parameters
int tempMaxSum = 0;
int candMaxSubArr = 0;
int j = 0;
int prefSum [1024];
dev_maxValues[index].val = 0;
// Resetting the prefix sum array
for(int iCtr = 0; iCtr < numCols; iCtr++)
prefSum[iCtr] = 0;
for(int i = index; i < numRows; i++)
{
tempMaxSum = 0;
j = 0;
for(int h = 0; h < numCols; h++)
{
prefSum[h] = prefSum[h] + dev_inputArray[i * numRows + h];
tempMaxSum = tempMaxSum + prefSum[h]; // t is the prefix sum of the strip start at row z to row x
if( tempMaxSum > candMaxSubArr)
{
candMaxSubArr = tempMaxSum;
dev_maxValues[index].val = candMaxSubArr;
dev_maxValues[index].x1 = index;
dev_maxValues[index].y1 = j;
dev_maxValues[index].x2 = i;
dev_maxValues[index].y2 = h;
}
if( tempMaxSum < 0 )
{
tempMaxSum = 0;
j = h + 1;
}
}
}
}
/*
__global__ void reduction(Max* g_data, const int blockSize, int& idxMax)
{
//allocate shared memory
__shared__ float gs_data[blockSize];
//thread index
int tx = threadIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int indOfMax=0;
//copy data to shared memory
gs_data[tx] = g_data[tid].val;
__syncthreads();
//working on the left half of the array to prevent divergence
for(int i = (blockDim.x/2); i>0; i/=2 )
{
if(tx < i)
if(gs_data[tx+i]>gs_data[tx]l)
{
gs_data[tx] = gs_data[tx+i];
indOfMax = tid;
}
__syncthreads();
}
if (tx==0)
indxMax = indOfMax
}
*/
#endif //_MAXSUBARRAY_KERNEL_CU_
| 4a4a53f62f509faa680b530f2e02c6816b2be269.cu | /*********************************************************************
* Copyrights (c) Marwan Abdellah. All rights reserved.
* This code is part of my Master's Thesis Project entitled "High
* Performance Fourier Volume Rendering on Graphics Processing Units
* (GPUs)" and submitted to the Systems & Biomedical Engineering
* Department, Faculty of Engineering, Cairo University.
* Please, don't use or distribute without authors' permission.
* File : Volume
* Author(s) : Marwan Abdellah <[email protected]>
* Created : April 2011
* Description :
* Note(s) :
*********************************************************************/
#ifndef _MAXSUBARRAY_KERNEL_CU_
#define _MAXSUBARRAY_KERNEL_CU_
#include <cutil_inline.h>
#include "Shared.h"
__global__
void findMax(int numRows, int numCols, Max* dev_maxValues, int* dev_inputArray)
{
// Calculating the correct index from the configuration
int index = blockIdx.x * blockDim.x + threadIdx.x;
// In between parameters
// TODO: Salah to indicate what are the parameters
int tempMaxSum = 0;
int candMaxSubArr = 0;
int j = 0;
int prefSum [1024];
dev_maxValues[index].val = 0;
// Resetting the prefix sum array
for(int iCtr = 0; iCtr < numCols; iCtr++)
prefSum[iCtr] = 0;
for(int i = index; i < numRows; i++)
{
tempMaxSum = 0;
j = 0;
for(int h = 0; h < numCols; h++)
{
prefSum[h] = prefSum[h] + dev_inputArray[i * numRows + h];
tempMaxSum = tempMaxSum + prefSum[h]; // t is the prefix sum of the strip start at row z to row x
if( tempMaxSum > candMaxSubArr)
{
candMaxSubArr = tempMaxSum;
dev_maxValues[index].val = candMaxSubArr;
dev_maxValues[index].x1 = index;
dev_maxValues[index].y1 = j;
dev_maxValues[index].x2 = i;
dev_maxValues[index].y2 = h;
}
if( tempMaxSum < 0 )
{
tempMaxSum = 0;
j = h + 1;
}
}
}
}
/*
__global__ void reduction(Max* g_data, const int blockSize, int& idxMax)
{
//allocate shared memory
__shared__ float gs_data[blockSize];
//thread index
int tx = threadIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int indOfMax=0;
//copy data to shared memory
gs_data[tx] = g_data[tid].val;
__syncthreads();
//working on the left half of the array to prevent divergence
for(int i = (blockDim.x/2); i>0; i/=2 )
{
if(tx < i)
if(gs_data[tx+i]>gs_data[tx]l)
{
gs_data[tx] = gs_data[tx+i];
indOfMax = tid;
}
__syncthreads();
}
if (tx==0)
indxMax = indOfMax
}
*/
#endif //_MAXSUBARRAY_KERNEL_CU_
|
7e0db595a9440543c3256b59f090ed79d07ecd52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "darknet/convolutional_layer.h"
#include "darknet/batchnorm_layer.h"
#include "darknet/gemm.h"
#include "darknet/blas.h"
#include "darknet/im2col.h"
#include "darknet/col2im.h"
#include "darknet/utils.h"
#include "darknet/cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
| 7e0db595a9440543c3256b59f090ed79d07ecd52.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "darknet/convolutional_layer.h"
#include "darknet/batchnorm_layer.h"
#include "darknet/gemm.h"
#include "darknet/blas.h"
#include "darknet/im2col.h"
#include "darknet/col2im.h"
#include "darknet/utils.h"
#include "darknet/cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
|
b6daebe0efe859477aa0a94c324dbe8ffab301ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "CUDA error: ", hipGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
//@@ Define any useful program-wide constants here
#define MASK_WIDTH 3
#define TILE_WIDTH 8
#define O_TILE_WIDTH 6
//@@ Define constant memory for device kernel here
__constant__ float deviceKernel[MASK_WIDTH*MASK_WIDTH*MASK_WIDTH];
__global__ void conv3d(float *input, float *output, const int z_size,
const int y_size, const int x_size) {
//@@ Insert kernel code here
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int dep_o = blockIdx.z*O_TILE_WIDTH + tz;
int row_i = row_o - (MASK_WIDTH/2);
int col_i = col_o - (MASK_WIDTH/2);
int dep_i = dep_o - (MASK_WIDTH/2);
__shared__ float N_ds[O_TILE_WIDTH+MASK_WIDTH-1][O_TILE_WIDTH+MASK_WIDTH-1][O_TILE_WIDTH+MASK_WIDTH-1];
if( (row_i >= 0) && (row_i < y_size) && (col_i >= 0) && (col_i < x_size) && (dep_i >= 0) && (dep_i < z_size) ) {
N_ds[tz][ty][tx] = input[col_i + (row_i * x_size) + (dep_i * y_size * x_size)];
} else {
N_ds[tz][ty][tx] = 0.0f;
}
__syncthreads();
float Pvalue = 0.0f;
if(ty < O_TILE_WIDTH && tx < O_TILE_WIDTH && tz < O_TILE_WIDTH) {
for(int z=0; z<MASK_WIDTH; z++) {
for(int y=0; y<MASK_WIDTH; y++) {
for(int x=0; x<MASK_WIDTH; x++) {
Pvalue += N_ds[z+tz][y+ty][x+tx] * deviceKernel[x + (y*MASK_WIDTH) + (z*MASK_WIDTH*MASK_WIDTH)];
}
}
}
if(row_o < y_size && col_o < x_size && dep_o < z_size )
output[col_o + (row_o * x_size) + (dep_o * y_size * x_size)] = Pvalue;
}
__syncthreads();
}
int main(int argc, char *argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float *hostInput;
float *hostKernel;
float *hostOutput;
float *deviceInput;
float *deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel =
(float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
hipMalloc((void **) &deviceInput, (inputLength-3)*sizeof(float));
hipMalloc((void **) &deviceOutput, (inputLength-3)*sizeof(float));
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
hipMemcpy(deviceInput, &hostInput[3], (inputLength-3)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyToSymbol(deviceKernel, hostKernel, kernelLength*sizeof(float));
// Recall that the first three elements of hostInput are dimensions and
// do
// not need to be copied to the gpu
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 dimGrid(ceil(x_size/(1.0*O_TILE_WIDTH)), ceil(y_size/(1.0*O_TILE_WIDTH)), ceil(z_size/(1.0*O_TILE_WIDTH)));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, TILE_WIDTH);
//@@ Launch the GPU kernel here
hipLaunchKernelGGL(( conv3d), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, z_size, y_size, x_size);
hipDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
hipMemcpy(&hostOutput[3], deviceOutput, (inputLength-3)*sizeof(float), hipMemcpyDeviceToHost);
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
hipFree(deviceInput);
hipFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
return 0;
}
| b6daebe0efe859477aa0a94c324dbe8ffab301ae.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "CUDA error: ", cudaGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
//@@ Define any useful program-wide constants here
#define MASK_WIDTH 3
#define TILE_WIDTH 8
#define O_TILE_WIDTH 6
//@@ Define constant memory for device kernel here
__constant__ float deviceKernel[MASK_WIDTH*MASK_WIDTH*MASK_WIDTH];
__global__ void conv3d(float *input, float *output, const int z_size,
const int y_size, const int x_size) {
//@@ Insert kernel code here
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int dep_o = blockIdx.z*O_TILE_WIDTH + tz;
int row_i = row_o - (MASK_WIDTH/2);
int col_i = col_o - (MASK_WIDTH/2);
int dep_i = dep_o - (MASK_WIDTH/2);
__shared__ float N_ds[O_TILE_WIDTH+MASK_WIDTH-1][O_TILE_WIDTH+MASK_WIDTH-1][O_TILE_WIDTH+MASK_WIDTH-1];
if( (row_i >= 0) && (row_i < y_size) && (col_i >= 0) && (col_i < x_size) && (dep_i >= 0) && (dep_i < z_size) ) {
N_ds[tz][ty][tx] = input[col_i + (row_i * x_size) + (dep_i * y_size * x_size)];
} else {
N_ds[tz][ty][tx] = 0.0f;
}
__syncthreads();
float Pvalue = 0.0f;
if(ty < O_TILE_WIDTH && tx < O_TILE_WIDTH && tz < O_TILE_WIDTH) {
for(int z=0; z<MASK_WIDTH; z++) {
for(int y=0; y<MASK_WIDTH; y++) {
for(int x=0; x<MASK_WIDTH; x++) {
Pvalue += N_ds[z+tz][y+ty][x+tx] * deviceKernel[x + (y*MASK_WIDTH) + (z*MASK_WIDTH*MASK_WIDTH)];
}
}
}
if(row_o < y_size && col_o < x_size && dep_o < z_size )
output[col_o + (row_o * x_size) + (dep_o * y_size * x_size)] = Pvalue;
}
__syncthreads();
}
int main(int argc, char *argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float *hostInput;
float *hostKernel;
float *hostOutput;
float *deviceInput;
float *deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel =
(float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
cudaMalloc((void **) &deviceInput, (inputLength-3)*sizeof(float));
cudaMalloc((void **) &deviceOutput, (inputLength-3)*sizeof(float));
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
cudaMemcpy(deviceInput, &hostInput[3], (inputLength-3)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(deviceKernel, hostKernel, kernelLength*sizeof(float));
// Recall that the first three elements of hostInput are dimensions and
// do
// not need to be copied to the gpu
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 dimGrid(ceil(x_size/(1.0*O_TILE_WIDTH)), ceil(y_size/(1.0*O_TILE_WIDTH)), ceil(z_size/(1.0*O_TILE_WIDTH)));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, TILE_WIDTH);
//@@ Launch the GPU kernel here
conv3d<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, z_size, y_size, x_size);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
cudaMemcpy(&hostOutput[3], deviceOutput, (inputLength-3)*sizeof(float), cudaMemcpyDeviceToHost);
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
cudaFree(deviceInput);
cudaFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
return 0;
}
|
f3c1595bbe95b2fbce9445813d327f4e75d605a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "delta_afl_gpu.cuh"
#include "core/cuda_macros.cuh"
#include "core/macros.h"
#include <stdio.h>
template <typename T, char CWARP_SIZE>
__device__ void delta_afl_compress_base_gpu(
const unsigned int bit_length,
unsigned long data_id,
unsigned long comp_data_id,
T *data,
T *compressed_data,
T* compressed_data_block_start,
unsigned long length
)
{
if (data_id >= length) return;
// TODO: Compressed data should be always unsigned, fix that latter
T v1;
unsigned int uv1;
unsigned int value = 0;
unsigned int v1_pos=0, v1_len;
unsigned long pos=comp_data_id, pos_data=data_id;
unsigned int sgn = 0;
T zeroLaneValue, v2;
const unsigned long lane = get_lane_id();
char neighborId = lane - 1;
const unsigned long data_block = ( blockIdx.x * blockDim.x) / CWARP_SIZE + threadIdx.x / CWARP_SIZE;
if (lane == 0 ) {
neighborId = 31;
zeroLaneValue = data[pos_data];
compressed_data_block_start[data_block] = zeroLaneValue;
}
for (unsigned int i = 0; i < CWORD_SIZE(T) && pos_data < length; ++i)
{
v1 = data[pos_data];
pos_data += CWARP_SIZE;
v2 = shfl_get_value(v1, neighborId);
if (lane == 0)
{
// Lane 0 uses data from previous iteration
v1 = zeroLaneValue - v1;
zeroLaneValue = v2;
} else {
v1 = v2 - v1;
}
//TODO: ugly hack, fix that with correct bfe calls
sgn = ((unsigned int) v1) >> 31;
uv1 = max(v1, -v1);
// END: ugly hack
if (v1_pos >= CWORD_SIZE(T) - bit_length){
v1_len = CWORD_SIZE(T) - v1_pos;
value = value | (GETNBITS(uv1, v1_len) << v1_pos);
if (v1_pos == CWORD_SIZE(T) - bit_length) // whole word
value |= (GETNBITS(uv1, v1_len - 1) | (sgn << (v1_len - 1))) << (v1_pos);
else // begining of the word
value |= GETNBITS(uv1, v1_len) << (v1_pos);
compressed_data[pos] = reinterpret_cast<int&>(value);
v1_pos = bit_length - v1_len;
value = 0;
// if is necessary as otherwise may work with negative bit shifts
if (v1_pos > 0) // The last part of the word
value = (GETNPBITS(uv1, v1_pos - 1, v1_len)) | (sgn << (v1_pos - 1));
pos += CWARP_SIZE;
} else {
v1_len = bit_length;
value |= (GETNBITS(uv1, v1_len-1) | (sgn << (v1_len-1))) << v1_pos;
v1_pos += v1_len;
}
}
if (pos_data >= length && pos_data < length + CWARP_SIZE)
{
compressed_data[pos] = reinterpret_cast<int&>(value);
}
}
template <typename T, char CWARP_SIZE>
__device__ void delta_afl_decompress_base_gpu(
const unsigned int bit_length,
unsigned long comp_data_id,
unsigned long data_id,
T *compressed_data,
T* compressed_data_block_start,
T *data,
unsigned long length
)
{
unsigned long pos = comp_data_id, pos_decomp = data_id;
unsigned int v1_pos = 0, v1_len;
unsigned int v1;
unsigned int ret;
int sret;
const unsigned long lane = get_lane_id();
if (pos_decomp >= length ) // Decompress not more elements then length
return;
v1 = reinterpret_cast<unsigned int &>(compressed_data[pos]);
T zeroLaneValue = 0, v2 = 0;
const unsigned long data_block = (blockIdx.x * blockDim.x) / CWARP_SIZE + threadIdx.x / CWARP_SIZE;
if (lane == 0) {
zeroLaneValue = compressed_data_block_start[data_block];
}
for (unsigned int i = 0; i < CWORD_SIZE(T) && pos_decomp < length; ++i)
{
if (v1_pos >= CWORD_SIZE(T) - bit_length){
v1_len = CWORD_SIZE(T) - v1_pos;
ret = GETNPBITS(v1, v1_len, v1_pos);
pos += CWARP_SIZE;
v1 = reinterpret_cast<unsigned int &>(compressed_data[pos]);
v1_pos = bit_length - v1_len;
ret = ret | ((GETNBITS(v1, v1_pos))<< v1_len);
} else {
v1_len = bit_length;
ret = GETNPBITS(v1, v1_len, v1_pos);
v1_pos += v1_len;
}
// TODO: dirty hack
int sgn_multiply = (ret >> (bit_length-1)) ? -1 : 1;
// END
ret &= NBITSTOMASK(bit_length-1);
sret = sgn_multiply * (int)(ret);
sret = shfl_prefix_sum(sret); // prefix sum deltas
v2 = shfl_get_value(zeroLaneValue, 0);
sret = v2 - sret;
data[pos_decomp] = sret;
pos_decomp += CWARP_SIZE;
v2 = shfl_get_value(sret, 31);
if(lane == 0)
zeroLaneValue = v2;
}
}
template < typename T, char CWARP_SIZE >
__global__ void delta_afl_compress_gpu (const unsigned int bit_length, T *data, T *compressed_data, T* compressed_data_block_start, unsigned long length)
{
const unsigned int warp_lane = get_lane_id();
const unsigned long data_block = blockIdx.x * blockDim.x + threadIdx.x - warp_lane;
const unsigned long data_id = data_block * CWORD_SIZE(T) + warp_lane;
const unsigned long cdata_id = data_block * bit_length + warp_lane;
delta_afl_compress_base_gpu <T, CWARP_SIZE> (bit_length, data_id, cdata_id, data, compressed_data, compressed_data_block_start, length);
}
template < typename T, char CWARP_SIZE >
__global__ void delta_afl_decompress_gpu (const unsigned int bit_length, T *compressed_data, T* compressed_data_block_start, T * decompress_data, unsigned long length)
{
const unsigned int warp_lane = get_lane_id();
const unsigned long data_block = blockIdx.x * blockDim.x + threadIdx.x - warp_lane;
const unsigned long data_id = data_block * CWORD_SIZE(T) + warp_lane;
const unsigned long cdata_id = data_block * bit_length + warp_lane;
delta_afl_decompress_base_gpu <T, CWARP_SIZE> (bit_length, cdata_id, data_id, compressed_data, compressed_data_block_start, decompress_data, length);
}
template < typename T, char CWARP_SIZE >
__host__ void run_delta_afl_compress_gpu(const unsigned int bit_length, T *data, T *compressed_data, T* compressed_data_block_start, unsigned long length)
{
const unsigned int block_size = CWARP_SIZE * 8; // better occupancy
const unsigned long block_number = (length + block_size * CWORD_SIZE(T) - 1) / (block_size * CWORD_SIZE(T));
hipLaunchKernelGGL(( delta_afl_compress_gpu <T, CWARP_SIZE>) , dim3(block_number), dim3(block_size), 0, 0, bit_length, data, compressed_data, compressed_data_block_start,length);
}
template < typename T, char CWARP_SIZE >
__host__ void run_delta_afl_decompress_gpu(const unsigned int bit_length, T *compressed_data, T* compressed_data_block_start, T *data, unsigned long length)
{
const unsigned int block_size = CWARP_SIZE * 8; // better occupancy
const unsigned long block_number = (length + block_size * CWORD_SIZE(T) - 1) / (block_size * CWORD_SIZE(T));
hipLaunchKernelGGL(( delta_afl_decompress_gpu <T, CWARP_SIZE>) , dim3(block_number), dim3(block_size), 0, 0, bit_length, compressed_data, compressed_data_block_start,data, length);
}
#define DELTA_GFL_SPEC(X, A) \
template __host__ void run_delta_afl_decompress_gpu<X, A> (const unsigned int bit_length, X *compressed_data, X* compressed_data_block_start, X *data, unsigned long length);\
template __host__ void run_delta_afl_compress_gpu<X, A> (const unsigned int bit_length, X *data, X *compressed_data, X* compressed_data_block_start, unsigned long length);\
template __global__ void delta_afl_decompress_gpu <X, A> (const unsigned int bit_length, X *compressed_data, X* compressed_data_block_start, X * decompress_data, unsigned long length);\
template __global__ void delta_afl_compress_gpu <X, A> (const unsigned int bit_length, X *data, X *compressed_data, X* compressed_data_block_start, unsigned long length);\
template __device__ void delta_afl_decompress_base_gpu <X, A> ( const unsigned int bit_length, unsigned long comp_data_id, unsigned long data_id, X *compressed_data, X* compressed_data_block_start, X *data, unsigned long length);\
template __device__ void delta_afl_compress_base_gpu <X, A> (const unsigned int bit_length, unsigned long data_id, unsigned long comp_data_id, X *data, X *compressed_data, X* compressed_data_block_start, unsigned long length);
#define DELTA_AFL_SPEC(X) DELTA_GFL_SPEC(X, 32)
FOR_EACH(DELTA_AFL_SPEC, char, short, int, long, unsigned int)
#define DELTA_FL_SPEC(X) DELTA_GFL_SPEC(X, 1)
FOR_EACH(DELTA_FL_SPEC, char, short, int, long, unsigned int)
| f3c1595bbe95b2fbce9445813d327f4e75d605a4.cu | #include "delta_afl_gpu.cuh"
#include "core/cuda_macros.cuh"
#include "core/macros.h"
#include <stdio.h>
template <typename T, char CWARP_SIZE>
__device__ void delta_afl_compress_base_gpu(
const unsigned int bit_length,
unsigned long data_id,
unsigned long comp_data_id,
T *data,
T *compressed_data,
T* compressed_data_block_start,
unsigned long length
)
{
if (data_id >= length) return;
// TODO: Compressed data should be always unsigned, fix that latter
T v1;
unsigned int uv1;
unsigned int value = 0;
unsigned int v1_pos=0, v1_len;
unsigned long pos=comp_data_id, pos_data=data_id;
unsigned int sgn = 0;
T zeroLaneValue, v2;
const unsigned long lane = get_lane_id();
char neighborId = lane - 1;
const unsigned long data_block = ( blockIdx.x * blockDim.x) / CWARP_SIZE + threadIdx.x / CWARP_SIZE;
if (lane == 0 ) {
neighborId = 31;
zeroLaneValue = data[pos_data];
compressed_data_block_start[data_block] = zeroLaneValue;
}
for (unsigned int i = 0; i < CWORD_SIZE(T) && pos_data < length; ++i)
{
v1 = data[pos_data];
pos_data += CWARP_SIZE;
v2 = shfl_get_value(v1, neighborId);
if (lane == 0)
{
// Lane 0 uses data from previous iteration
v1 = zeroLaneValue - v1;
zeroLaneValue = v2;
} else {
v1 = v2 - v1;
}
//TODO: ugly hack, fix that with correct bfe calls
sgn = ((unsigned int) v1) >> 31;
uv1 = max(v1, -v1);
// END: ugly hack
if (v1_pos >= CWORD_SIZE(T) - bit_length){
v1_len = CWORD_SIZE(T) - v1_pos;
value = value | (GETNBITS(uv1, v1_len) << v1_pos);
if (v1_pos == CWORD_SIZE(T) - bit_length) // whole word
value |= (GETNBITS(uv1, v1_len - 1) | (sgn << (v1_len - 1))) << (v1_pos);
else // begining of the word
value |= GETNBITS(uv1, v1_len) << (v1_pos);
compressed_data[pos] = reinterpret_cast<int&>(value);
v1_pos = bit_length - v1_len;
value = 0;
// if is necessary as otherwise may work with negative bit shifts
if (v1_pos > 0) // The last part of the word
value = (GETNPBITS(uv1, v1_pos - 1, v1_len)) | (sgn << (v1_pos - 1));
pos += CWARP_SIZE;
} else {
v1_len = bit_length;
value |= (GETNBITS(uv1, v1_len-1) | (sgn << (v1_len-1))) << v1_pos;
v1_pos += v1_len;
}
}
if (pos_data >= length && pos_data < length + CWARP_SIZE)
{
compressed_data[pos] = reinterpret_cast<int&>(value);
}
}
template <typename T, char CWARP_SIZE>
__device__ void delta_afl_decompress_base_gpu(
const unsigned int bit_length,
unsigned long comp_data_id,
unsigned long data_id,
T *compressed_data,
T* compressed_data_block_start,
T *data,
unsigned long length
)
{
unsigned long pos = comp_data_id, pos_decomp = data_id;
unsigned int v1_pos = 0, v1_len;
unsigned int v1;
unsigned int ret;
int sret;
const unsigned long lane = get_lane_id();
if (pos_decomp >= length ) // Decompress not more elements then length
return;
v1 = reinterpret_cast<unsigned int &>(compressed_data[pos]);
T zeroLaneValue = 0, v2 = 0;
const unsigned long data_block = (blockIdx.x * blockDim.x) / CWARP_SIZE + threadIdx.x / CWARP_SIZE;
if (lane == 0) {
zeroLaneValue = compressed_data_block_start[data_block];
}
for (unsigned int i = 0; i < CWORD_SIZE(T) && pos_decomp < length; ++i)
{
if (v1_pos >= CWORD_SIZE(T) - bit_length){
v1_len = CWORD_SIZE(T) - v1_pos;
ret = GETNPBITS(v1, v1_len, v1_pos);
pos += CWARP_SIZE;
v1 = reinterpret_cast<unsigned int &>(compressed_data[pos]);
v1_pos = bit_length - v1_len;
ret = ret | ((GETNBITS(v1, v1_pos))<< v1_len);
} else {
v1_len = bit_length;
ret = GETNPBITS(v1, v1_len, v1_pos);
v1_pos += v1_len;
}
// TODO: dirty hack
int sgn_multiply = (ret >> (bit_length-1)) ? -1 : 1;
// END
ret &= NBITSTOMASK(bit_length-1);
sret = sgn_multiply * (int)(ret);
sret = shfl_prefix_sum(sret); // prefix sum deltas
v2 = shfl_get_value(zeroLaneValue, 0);
sret = v2 - sret;
data[pos_decomp] = sret;
pos_decomp += CWARP_SIZE;
v2 = shfl_get_value(sret, 31);
if(lane == 0)
zeroLaneValue = v2;
}
}
template < typename T, char CWARP_SIZE >
__global__ void delta_afl_compress_gpu (const unsigned int bit_length, T *data, T *compressed_data, T* compressed_data_block_start, unsigned long length)
{
const unsigned int warp_lane = get_lane_id();
const unsigned long data_block = blockIdx.x * blockDim.x + threadIdx.x - warp_lane;
const unsigned long data_id = data_block * CWORD_SIZE(T) + warp_lane;
const unsigned long cdata_id = data_block * bit_length + warp_lane;
delta_afl_compress_base_gpu <T, CWARP_SIZE> (bit_length, data_id, cdata_id, data, compressed_data, compressed_data_block_start, length);
}
template < typename T, char CWARP_SIZE >
__global__ void delta_afl_decompress_gpu (const unsigned int bit_length, T *compressed_data, T* compressed_data_block_start, T * decompress_data, unsigned long length)
{
const unsigned int warp_lane = get_lane_id();
const unsigned long data_block = blockIdx.x * blockDim.x + threadIdx.x - warp_lane;
const unsigned long data_id = data_block * CWORD_SIZE(T) + warp_lane;
const unsigned long cdata_id = data_block * bit_length + warp_lane;
delta_afl_decompress_base_gpu <T, CWARP_SIZE> (bit_length, cdata_id, data_id, compressed_data, compressed_data_block_start, decompress_data, length);
}
template < typename T, char CWARP_SIZE >
__host__ void run_delta_afl_compress_gpu(const unsigned int bit_length, T *data, T *compressed_data, T* compressed_data_block_start, unsigned long length)
{
const unsigned int block_size = CWARP_SIZE * 8; // better occupancy
const unsigned long block_number = (length + block_size * CWORD_SIZE(T) - 1) / (block_size * CWORD_SIZE(T));
delta_afl_compress_gpu <T, CWARP_SIZE> <<<block_number, block_size>>> (bit_length, data, compressed_data, compressed_data_block_start,length);
}
template < typename T, char CWARP_SIZE >
__host__ void run_delta_afl_decompress_gpu(const unsigned int bit_length, T *compressed_data, T* compressed_data_block_start, T *data, unsigned long length)
{
const unsigned int block_size = CWARP_SIZE * 8; // better occupancy
const unsigned long block_number = (length + block_size * CWORD_SIZE(T) - 1) / (block_size * CWORD_SIZE(T));
delta_afl_decompress_gpu <T, CWARP_SIZE> <<<block_number, block_size>>> (bit_length, compressed_data, compressed_data_block_start,data, length);
}
#define DELTA_GFL_SPEC(X, A) \
template __host__ void run_delta_afl_decompress_gpu<X, A> (const unsigned int bit_length, X *compressed_data, X* compressed_data_block_start, X *data, unsigned long length);\
template __host__ void run_delta_afl_compress_gpu<X, A> (const unsigned int bit_length, X *data, X *compressed_data, X* compressed_data_block_start, unsigned long length);\
template __global__ void delta_afl_decompress_gpu <X, A> (const unsigned int bit_length, X *compressed_data, X* compressed_data_block_start, X * decompress_data, unsigned long length);\
template __global__ void delta_afl_compress_gpu <X, A> (const unsigned int bit_length, X *data, X *compressed_data, X* compressed_data_block_start, unsigned long length);\
template __device__ void delta_afl_decompress_base_gpu <X, A> ( const unsigned int bit_length, unsigned long comp_data_id, unsigned long data_id, X *compressed_data, X* compressed_data_block_start, X *data, unsigned long length);\
template __device__ void delta_afl_compress_base_gpu <X, A> (const unsigned int bit_length, unsigned long data_id, unsigned long comp_data_id, X *data, X *compressed_data, X* compressed_data_block_start, unsigned long length);
#define DELTA_AFL_SPEC(X) DELTA_GFL_SPEC(X, 32)
FOR_EACH(DELTA_AFL_SPEC, char, short, int, long, unsigned int)
#define DELTA_FL_SPEC(X) DELTA_GFL_SPEC(X, 1)
FOR_EACH(DELTA_FL_SPEC, char, short, int, long, unsigned int)
|
92019b4abaa8a18f945bdf801b06457840499a54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/conv_bias/matmul/im2col_nhwc_int8.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/conv_bias/matmul/im2col_nhwc_int8.cuh"
#include "src/cuda/utils.cuh"
namespace {
template <bool flip>
__global__ void im2col_kern(const int8_t* __restrict src,
int8_t* __restrict unrolled, uint32_t N,
uint32_t IH, uint32_t IW, uint32_t IC, uint32_t IWS,
uint32_t OH, uint32_t OW, uint32_t OC, uint32_t OWS,
uint32_t FH, uint32_t FW, uint32_t PH, uint32_t PW,
uint32_t SH, uint32_t SW, uint32_t DH, uint32_t DW,
uint32_t LD) {
uint32_t ic = blockIdx.x * 32 + threadIdx.x;
uint32_t ow = blockIdx.y * 4 + threadIdx.y;
uint32_t oh = blockIdx.z * 4 + threadIdx.z;
uint32_t offset = (oh * OW + ow) * LD + ic;
if (ic < IC && ow < OW && oh < OH) {
for (uint32_t fh = 0; fh < FH; ++fh) {
for (size_t fw = 0; fw < FW; ++fw) {
uint32_t ih = -PH + oh * SH + (flip ? FH - fh - 1 : fh) * DH;
uint32_t iw = -PW + ow * SW + (flip ? FW - fw - 1 : fw) * DW;
uint32_t i = offset + (fh * FW + fw) * IC;
if (ih < IH && iw < IW) {
unrolled[i] = src[(ih * IW + iw) * IWS + ic];
} else {
unrolled[i] = 0;
}
}
}
}
}
} // anonymous namespace
void megdnn::cuda::im2col_nhwc_int8(const int8_t* src, int8_t* unrolled,
uint32_t N, uint32_t IH, uint32_t IW,
uint32_t IC, uint32_t IWS, uint32_t OH,
uint32_t OW, uint32_t OC, uint32_t OWS,
uint32_t FH, uint32_t FW, uint32_t PH,
uint32_t PW, uint32_t SH, uint32_t SW,
uint32_t DH, uint32_t DW, uint32_t LD,
bool flip, hipStream_t stream) {
dim3 nthreads = dim3(32, 4, 4);
dim3 nblocks = dim3(DIVUP(IC, 32), DIVUP(OW, 4), DIVUP(OH, 4));
void (*kern_ptr)(const int8_t* __restrict src, int8_t* __restrict unrolled,
uint32_t N, uint32_t IH, uint32_t IW, uint32_t IC,
uint32_t IWS, uint32_t OH, uint32_t OW, uint32_t OC,
uint32_t OWS, uint32_t FH, uint32_t FW, uint32_t PH,
uint32_t PW, uint32_t SH, uint32_t SW, uint32_t DH,
uint32_t DW, uint32_t LD);
if (flip) {
kern_ptr = im2col_kern<true>;
} else {
kern_ptr = im2col_kern<false>;
}
for (size_t n = 0; n < N; ++n) {
hipLaunchKernelGGL(( kern_ptr), dim3(nblocks), dim3(nthreads), 0, stream,
src + n * IH * IW * IWS, unrolled + n * OH * OW * LD, N, IH, IW,
IC, IWS, OH, OW, OC, OWS, FH, FW, PH, PW, SH, SW, DH, DW, LD);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen
| 92019b4abaa8a18f945bdf801b06457840499a54.cu | /**
* \file dnn/src/cuda/conv_bias/matmul/im2col_nhwc_int8.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/conv_bias/matmul/im2col_nhwc_int8.cuh"
#include "src/cuda/utils.cuh"
namespace {
template <bool flip>
__global__ void im2col_kern(const int8_t* __restrict src,
int8_t* __restrict unrolled, uint32_t N,
uint32_t IH, uint32_t IW, uint32_t IC, uint32_t IWS,
uint32_t OH, uint32_t OW, uint32_t OC, uint32_t OWS,
uint32_t FH, uint32_t FW, uint32_t PH, uint32_t PW,
uint32_t SH, uint32_t SW, uint32_t DH, uint32_t DW,
uint32_t LD) {
uint32_t ic = blockIdx.x * 32 + threadIdx.x;
uint32_t ow = blockIdx.y * 4 + threadIdx.y;
uint32_t oh = blockIdx.z * 4 + threadIdx.z;
uint32_t offset = (oh * OW + ow) * LD + ic;
if (ic < IC && ow < OW && oh < OH) {
for (uint32_t fh = 0; fh < FH; ++fh) {
for (size_t fw = 0; fw < FW; ++fw) {
uint32_t ih = -PH + oh * SH + (flip ? FH - fh - 1 : fh) * DH;
uint32_t iw = -PW + ow * SW + (flip ? FW - fw - 1 : fw) * DW;
uint32_t i = offset + (fh * FW + fw) * IC;
if (ih < IH && iw < IW) {
unrolled[i] = src[(ih * IW + iw) * IWS + ic];
} else {
unrolled[i] = 0;
}
}
}
}
}
} // anonymous namespace
void megdnn::cuda::im2col_nhwc_int8(const int8_t* src, int8_t* unrolled,
uint32_t N, uint32_t IH, uint32_t IW,
uint32_t IC, uint32_t IWS, uint32_t OH,
uint32_t OW, uint32_t OC, uint32_t OWS,
uint32_t FH, uint32_t FW, uint32_t PH,
uint32_t PW, uint32_t SH, uint32_t SW,
uint32_t DH, uint32_t DW, uint32_t LD,
bool flip, cudaStream_t stream) {
dim3 nthreads = dim3(32, 4, 4);
dim3 nblocks = dim3(DIVUP(IC, 32), DIVUP(OW, 4), DIVUP(OH, 4));
void (*kern_ptr)(const int8_t* __restrict src, int8_t* __restrict unrolled,
uint32_t N, uint32_t IH, uint32_t IW, uint32_t IC,
uint32_t IWS, uint32_t OH, uint32_t OW, uint32_t OC,
uint32_t OWS, uint32_t FH, uint32_t FW, uint32_t PH,
uint32_t PW, uint32_t SH, uint32_t SW, uint32_t DH,
uint32_t DW, uint32_t LD);
if (flip) {
kern_ptr = im2col_kern<true>;
} else {
kern_ptr = im2col_kern<false>;
}
for (size_t n = 0; n < N; ++n) {
kern_ptr<<<nblocks, nthreads, 0, stream>>>(
src + n * IH * IW * IWS, unrolled + n * OH * OW * LD, N, IH, IW,
IC, IWS, OH, OW, OC, OWS, FH, FW, PH, PW, SH, SW, DH, DW, LD);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen
|
d1ca2dbec7dcb7d268ea0674ed1be90c5b53bf06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//----------------------------------------------------------------------------------
//
// FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU
// Copyright (C) 2012-2013. Rama Hoetzlein, http://fluids3.com
//
// BSD 3-clause:
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its contributors may
// be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//----------------------------------------------------------------------------------
#define CUDA_KERNEL
#include "point_fusion_cuda.cuh"
#include "cutil_math.h" // cutil32.lib
#include <string.h>
#include <assert.h>
struct ALIGN(16) Obj {
float3 pos;
float3 size;
float3 loc;
uint clr;
};
struct ALIGN(16) ScanInfo {
int* objGrid;
int* objCnts;
Obj* objList;
float3* pntList;
uint* pntClrs;
int3 gridRes;
float3 gridSize;
float3 cams;
float3 camu;
float3 camv;
uint* rnd_seeds;
};
__device__ ScanInfo scan;
__device__ int pntout;
// Generate random unsigned int in [0, 2^24)
static __host__ __device__ __inline__ unsigned int lcg(unsigned int &prev)
{
const unsigned int LCG_A = 1664525u;
const unsigned int LCG_C = 1013904223u;
prev = (LCG_A * prev + LCG_C);
return prev & 0x00FFFFFF;
}
static __host__ __device__ __inline__ unsigned int lcg2(unsigned int &prev)
{
prev = (prev*8121 + 28411) % 134456;
return prev;
}
static __host__ __device__ __inline__ float rnd(unsigned int &prev)
{
return ((float) lcg(prev) / (float) 0x01000000);
}
// Get view ray
inline __device__ float3 getViewRay ( float x, float y )
{
float3 v = x*scan.camu + y*scan.camv + scan.cams;
return normalize(v);
}
#define NOHIT 1.0e10f
// Ray box intersection
inline __device__ float3 rayBoxIntersect ( float3 rpos, float3 rdir, float3 vmin, float3 vmax )
{
register float ht[8];
ht[0] = (vmin.x - rpos.x)/rdir.x;
ht[1] = (vmax.x - rpos.x)/rdir.x;
ht[2] = (vmin.y - rpos.y)/rdir.y;
ht[3] = (vmax.y - rpos.y)/rdir.y;
ht[4] = (vmin.z - rpos.z)/rdir.z;
ht[5] = (vmax.z - rpos.z)/rdir.z;
ht[6] = fmax(fmax(fmin(ht[0], ht[1]), fmin(ht[2], ht[3])), fmin(ht[4], ht[5]));
ht[7] = fmin(fmin(fmax(ht[0], ht[1]), fmax(ht[2], ht[3])), fmax(ht[4], ht[5]));
ht[6] = (ht[6] < 0 ) ? 0.0 : ht[6];
return make_float3( ht[6], ht[7], (ht[7]<ht[6] || ht[7]<0) ? NOHIT : 0 );
}
#define COLOR(r,g,b) ( (uint((b)*255.0f)<<16) | (uint((g)*255.0f)<<8) | uint((r)*255.0f) )
float3 __device__ __inline__ jitter_sample ()
{
uint index = (threadIdx.y % 128) * 128 + (threadIdx.x % 128);
unsigned int seed = scan.rnd_seeds[ index ];
float uu = rnd( seed );
float vv = rnd( seed );
float ww = rnd( seed );
scan.rnd_seeds[ index ] = seed;
return make_float3(uu,vv,ww);
}
extern "C" __global__ void scanBuildings ( float3 pos, int3 res, int num_obj, float tmax )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= res.x || y >= res.y ) return;
float3 jit = jitter_sample();
float3 dir = getViewRay( float(x+jit.x)/float(res.x), float(y+jit.y)/float(res.y) );
int gcell = int(pos.z/scan.gridSize.y) * scan.gridRes.x + int(pos.x/scan.gridSize.x);
if ( gcell < 0 || gcell > scan.gridRes.x*scan.gridRes.y) return;
Obj* bldg;
float3 t, tnearest;
uint clr = 0;
tnearest.x = NOHIT;
//for (int n=0; n < scan.objCnts[gcell]; n++) {
// bldg = scan.objList + (scan.objGrid[gcell] + n);
for (int n=0; n < num_obj; n++) {
bldg = scan.objList + n;
if ( bldg != 0 ) {
t = rayBoxIntersect ( pos, dir, bldg->pos, bldg->pos + bldg->size );
if ( t.x < tnearest.x && t.x < tmax && t.z != NOHIT ) {
tnearest = t;
clr = bldg->clr;
}
}
}
if ( tnearest.x == NOHIT) { scan.pntList[ y*res.x + x] = make_float3(0,0,0); return; }
atomicAdd(&pntout, 1);
scan.pntList[ y*res.x + x] = pos + tnearest.x * dir;
scan.pntClrs[ y*res.x + x] = clr;
}
| d1ca2dbec7dcb7d268ea0674ed1be90c5b53bf06.cu | //----------------------------------------------------------------------------------
//
// FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU
// Copyright (C) 2012-2013. Rama Hoetzlein, http://fluids3.com
//
// BSD 3-clause:
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its contributors may
// be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//----------------------------------------------------------------------------------
#define CUDA_KERNEL
#include "point_fusion_cuda.cuh"
#include "cutil_math.h" // cutil32.lib
#include <string.h>
#include <assert.h>
struct ALIGN(16) Obj {
float3 pos;
float3 size;
float3 loc;
uint clr;
};
struct ALIGN(16) ScanInfo {
int* objGrid;
int* objCnts;
Obj* objList;
float3* pntList;
uint* pntClrs;
int3 gridRes;
float3 gridSize;
float3 cams;
float3 camu;
float3 camv;
uint* rnd_seeds;
};
__device__ ScanInfo scan;
__device__ int pntout;
// Generate random unsigned int in [0, 2^24)
static __host__ __device__ __inline__ unsigned int lcg(unsigned int &prev)
{
const unsigned int LCG_A = 1664525u;
const unsigned int LCG_C = 1013904223u;
prev = (LCG_A * prev + LCG_C);
return prev & 0x00FFFFFF;
}
static __host__ __device__ __inline__ unsigned int lcg2(unsigned int &prev)
{
prev = (prev*8121 + 28411) % 134456;
return prev;
}
static __host__ __device__ __inline__ float rnd(unsigned int &prev)
{
return ((float) lcg(prev) / (float) 0x01000000);
}
// Get view ray
inline __device__ float3 getViewRay ( float x, float y )
{
float3 v = x*scan.camu + y*scan.camv + scan.cams;
return normalize(v);
}
#define NOHIT 1.0e10f
// Ray box intersection
inline __device__ float3 rayBoxIntersect ( float3 rpos, float3 rdir, float3 vmin, float3 vmax )
{
register float ht[8];
ht[0] = (vmin.x - rpos.x)/rdir.x;
ht[1] = (vmax.x - rpos.x)/rdir.x;
ht[2] = (vmin.y - rpos.y)/rdir.y;
ht[3] = (vmax.y - rpos.y)/rdir.y;
ht[4] = (vmin.z - rpos.z)/rdir.z;
ht[5] = (vmax.z - rpos.z)/rdir.z;
ht[6] = fmax(fmax(fmin(ht[0], ht[1]), fmin(ht[2], ht[3])), fmin(ht[4], ht[5]));
ht[7] = fmin(fmin(fmax(ht[0], ht[1]), fmax(ht[2], ht[3])), fmax(ht[4], ht[5]));
ht[6] = (ht[6] < 0 ) ? 0.0 : ht[6];
return make_float3( ht[6], ht[7], (ht[7]<ht[6] || ht[7]<0) ? NOHIT : 0 );
}
#define COLOR(r,g,b) ( (uint((b)*255.0f)<<16) | (uint((g)*255.0f)<<8) | uint((r)*255.0f) )
float3 __device__ __inline__ jitter_sample ()
{
uint index = (threadIdx.y % 128) * 128 + (threadIdx.x % 128);
unsigned int seed = scan.rnd_seeds[ index ];
float uu = rnd( seed );
float vv = rnd( seed );
float ww = rnd( seed );
scan.rnd_seeds[ index ] = seed;
return make_float3(uu,vv,ww);
}
extern "C" __global__ void scanBuildings ( float3 pos, int3 res, int num_obj, float tmax )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= res.x || y >= res.y ) return;
float3 jit = jitter_sample();
float3 dir = getViewRay( float(x+jit.x)/float(res.x), float(y+jit.y)/float(res.y) );
int gcell = int(pos.z/scan.gridSize.y) * scan.gridRes.x + int(pos.x/scan.gridSize.x);
if ( gcell < 0 || gcell > scan.gridRes.x*scan.gridRes.y) return;
Obj* bldg;
float3 t, tnearest;
uint clr = 0;
tnearest.x = NOHIT;
//for (int n=0; n < scan.objCnts[gcell]; n++) {
// bldg = scan.objList + (scan.objGrid[gcell] + n);
for (int n=0; n < num_obj; n++) {
bldg = scan.objList + n;
if ( bldg != 0 ) {
t = rayBoxIntersect ( pos, dir, bldg->pos, bldg->pos + bldg->size );
if ( t.x < tnearest.x && t.x < tmax && t.z != NOHIT ) {
tnearest = t;
clr = bldg->clr;
}
}
}
if ( tnearest.x == NOHIT) { scan.pntList[ y*res.x + x] = make_float3(0,0,0); return; }
atomicAdd(&pntout, 1);
scan.pntList[ y*res.x + x] = pos + tnearest.x * dir;
scan.pntClrs[ y*res.x + x] = clr;
}
|
0cb651041917a4864043d1ab1d35e8f0cc146e4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file histo-global.cu histogram with global memory atomics */
#include <assert.h>
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** CUDA check macro */
#define cucheck(call) \
{\
hipError_t res = (call);\
if(res != hipSuccess) {\
const char* err_str = hipGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
#define cucheck_dev(call) \
{\
hipError_t res = (call);\
if(res != hipSuccess) {\
const char* err_str = hipGetErrorString(res);\
printf("%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
assert(0); \
}\
}
/** time spent in device */
float gpu_time = 0;
/** a useful function to compute the number of threads */
__host__ __device__ int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 512
/** block size along */
#define BSX 64
#define BSY 4
/** maximum recursion depth */
#define MAX_DEPTH 4
/** region below which do per-pixel */
#define MIN_SIZE 32
/** subdivision factor along each axis */
#define SUBDIV 4
/** subdivision when launched from host */
#define INIT_SUBDIV 32
/** find the dwell for the pixel */
__device__ int pixel_dwell(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** binary operation for common dwell "reduction": MAX_DWELL + 1 = neutral
element, -1 = dwells are different */
#define NEUT_DWELL (MAX_DWELL + 1)
#define DIFF_DWELL (-1)
__device__ int same_dwell(int d1, int d2) {
if(d1 == d2)
return d1;
else if(d1 == NEUT_DWELL || d2 == NEUT_DWELL)
return min(d1, d2);
else
return DIFF_DWELL;
} // same_dwell
/** evaluates the common border dwell, if it exists */
__device__ int border_dwell
(int w, int h, complex cmin, complex cmax, int x0, int y0, int d) {
// check whether all boundary pixels have the same dwell
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int bs = blockDim.x * blockDim.y;
int comm_dwell = NEUT_DWELL;
// for all boundary pixels, distributed across threads
for(int r = tid; r < d; r += bs) {
// for each boundary: b = 0 is east, then counter-clockwise
for(int b = 0; b < 4; b++) {
int x = b % 2 != 0 ? x0 + r : (b == 0 ? x0 + d - 1 : x0);
int y = b % 2 == 0 ? y0 + r : (b == 1 ? y0 + d - 1 : y0);
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
comm_dwell = same_dwell(comm_dwell, dwell);
}
} // for all boundary pixels
// reduce across threads in the block
__shared__ int ldwells[BSX * BSY];
int nt = min(d, BSX * BSY);
if(tid < nt)
ldwells[tid] = comm_dwell;
__syncthreads();
for(; nt > 1; nt /= 2) {
if(tid < nt / 2)
ldwells[tid] = same_dwell(ldwells[tid], ldwells[tid + nt / 2]);
__syncthreads();
}
return ldwells[0];
} // border_dwell
/** the kernel to fill the image region with a specific dwell value */
__global__ void dwell_fill_k
(int *dwells, int w, int x0, int y0, int d, int dwell) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < d && y < d) {
x += x0, y += y0;
dwells[y * w + x] = dwell;
}
} // dwell_fill_k
/** the kernel to fill in per-pixel values of the portion of the Mandelbrot set
*/
__global__ void mandelbrot_pixel_k
(int *dwells, int w, int h, complex cmin, complex cmax, int x0, int y0, int d) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x < d && y < d) {
x += x0, y += y0;
dwells[y * w + x] = pixel_dwell(w, h, cmin, cmax, x, y);
}
} // mandelbrot_pixel_k
/** checking for an error */
__device__ void check_error(int x0, int y0, int d) {
int err = hipGetLastError();
if(err != hipSuccess) {
printf("error launching kernel for region (%d..%d, %d..%d)\n",
x0, x0 + d, y0, y0 + d);
assert(0);
}
}
/** computes the dwells for Mandelbrot image using dynamic parallelism; one
block is launched per pixel
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
@param x0 the starting x coordinate of the portion to compute
@param y0 the starting y coordinate of the portion to compute
@param d the size of the portion to compute (the portion is always a square)
@param depth kernel invocation depth
@remarks the algorithm reverts to per-pixel Mandelbrot evaluation once
either maximum depth or minimum size is reached
*/
__global__ void mandelbrot_block_k
(int *dwells, int w, int h, complex cmin, complex cmax, int x0, int y0,
int d, int depth) {
x0 += d * blockIdx.x, y0 += d * blockIdx.y;
int comm_dwell = border_dwell(w, h, cmin, cmax, x0, y0, d);
if(threadIdx.x == 0 && threadIdx.y == 0) {
if(comm_dwell != DIFF_DWELL) {
// uniform dwell, just fill
dim3 bs(BSX, BSY), grid(divup(d, BSX), divup(d, BSY));
hipLaunchKernelGGL(( dwell_fill_k), dim3(grid), dim3(bs), 0, 0, dwells, w, x0, y0, d, comm_dwell);
} else if(depth + 1 < MAX_DEPTH && d / SUBDIV > MIN_SIZE) {
// subdivide recursively
dim3 bs(blockDim.x, blockDim.y), grid(SUBDIV, SUBDIV);
hipLaunchKernelGGL(( mandelbrot_block_k), dim3(grid), dim3(bs), 0, 0,
dwells, w, h, cmin, cmax, x0, y0, d / SUBDIV, depth + 1);
} else {
// leaf, per-pixel kernel
dim3 bs(BSX, BSY), grid(divup(d, BSX), divup(d, BSY));
hipLaunchKernelGGL(( mandelbrot_pixel_k), dim3(grid), dim3(bs), 0, 0,
dwells, w, h, cmin, cmax, x0, y0, d);
}
cucheck_dev(hipGetLastError());
//check_error(x0, y0, d);
}
} // mandelbrot_block_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
/** data size */
#define H (16 * 1024)
#define W (16 * 1024)
#define IMAGE_PATH "./mandelbrot.png"
int main(int argc, char **argv) {
// allocate memory
int w = W, h = H;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(hipMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
dim3 bs(BSX, BSY), grid(INIT_SUBDIV, INIT_SUBDIV);
const int num_iter = 100;
// compute the dwells, copy them back
#define WITH_CUDA_GRAPH
#ifdef WITH_CUDA_GRAPH
printf("\n With cuda graph\n");
hipStream_t stream;
hipGraph_t graph;
hipGraphExec_t cuda_graph_exec;
cucheck(hipStreamCreate(&stream));
cucheck(hipStreamBeginCapture(stream, hipStreamCaptureModeGlobal));
hipLaunchKernelGGL(( mandelbrot_block_k), dim3(grid), dim3(bs),0, stream,
d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1), 0, 0, W / INIT_SUBDIV, 1);
cucheck(hipStreamEndCapture(stream, &graph));
cucheck(hipGraphInstantiate(&cuda_graph_exec, graph, NULL, NULL, 0));
hipEvent_t start, stop;
cucheck(hipEventCreate(&start));
cucheck(hipEventCreate(&stop));
cucheck(hipEventRecord(start, stream));
for(int iter = 0;iter < num_iter; ++iter){
cucheck(hipGraphLaunch(cuda_graph_exec, stream));
cucheck(hipStreamSynchronize(stream));
}
cucheck(hipEventRecord(stop, stream));
cucheck(hipEventSynchronize(stop));
cucheck(hipDeviceSynchronize());
cucheck(hipGetLastError());
cucheck(hipGraphExecDestroy(cuda_graph_exec));
cucheck(hipGraphDestroy(graph));
cucheck(hipStreamDestroy(stream));
cucheck(hipEventElapsedTime(&gpu_time, start, stop));
gpu_time /=static_cast<float>(num_iter);
#else
hipEvent_t start, stop;
cucheck(hipEventCreate(&start));
cucheck(hipEventCreate(&stop));
cucheck(hipEventRecord(start, NULL));
for(int iter = 0;iter < num_iter; ++iter){
hipLaunchKernelGGL(( mandelbrot_block_k), dim3(grid), dim3(bs), 0, 0,
d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1), 0, 0, W / INIT_SUBDIV, 1);
}
cucheck(hipEventRecord(stop, NULL));
cucheck(hipEventSynchronize(stop));
cucheck(hipDeviceSynchronize());
cucheck(hipGetLastError());
cucheck(hipEventElapsedTime(&gpu_time, start, stop));
gpu_time /=static_cast<float>(num_iter);
#endif
//cucheck(hipMemcpy(h_dwells, d_dwells, dwell_sz, hipMemcpyDeviceToHost));
// save the image to PNG file
//save_image(IMAGE_PATH, h_dwells, w, h);
// print performance
printf("Mandelbrot set computed in %.9f ms, at %.9f Mpix/s\n", gpu_time,
h * w * 1e-3 / gpu_time);
// free data
hipFree(d_dwells);
free(h_dwells);
return 0;
} // main
| 0cb651041917a4864043d1ab1d35e8f0cc146e4d.cu | /** @file histo-global.cu histogram with global memory atomics */
#include <assert.h>
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** CUDA check macro */
#define cucheck(call) \
{\
cudaError_t res = (call);\
if(res != cudaSuccess) {\
const char* err_str = cudaGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
#define cucheck_dev(call) \
{\
cudaError_t res = (call);\
if(res != cudaSuccess) {\
const char* err_str = cudaGetErrorString(res);\
printf("%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
assert(0); \
}\
}
/** time spent in device */
float gpu_time = 0;
/** a useful function to compute the number of threads */
__host__ __device__ int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 512
/** block size along */
#define BSX 64
#define BSY 4
/** maximum recursion depth */
#define MAX_DEPTH 4
/** region below which do per-pixel */
#define MIN_SIZE 32
/** subdivision factor along each axis */
#define SUBDIV 4
/** subdivision when launched from host */
#define INIT_SUBDIV 32
/** find the dwell for the pixel */
__device__ int pixel_dwell(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** binary operation for common dwell "reduction": MAX_DWELL + 1 = neutral
element, -1 = dwells are different */
#define NEUT_DWELL (MAX_DWELL + 1)
#define DIFF_DWELL (-1)
__device__ int same_dwell(int d1, int d2) {
if(d1 == d2)
return d1;
else if(d1 == NEUT_DWELL || d2 == NEUT_DWELL)
return min(d1, d2);
else
return DIFF_DWELL;
} // same_dwell
/** evaluates the common border dwell, if it exists */
__device__ int border_dwell
(int w, int h, complex cmin, complex cmax, int x0, int y0, int d) {
// check whether all boundary pixels have the same dwell
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int bs = blockDim.x * blockDim.y;
int comm_dwell = NEUT_DWELL;
// for all boundary pixels, distributed across threads
for(int r = tid; r < d; r += bs) {
// for each boundary: b = 0 is east, then counter-clockwise
for(int b = 0; b < 4; b++) {
int x = b % 2 != 0 ? x0 + r : (b == 0 ? x0 + d - 1 : x0);
int y = b % 2 == 0 ? y0 + r : (b == 1 ? y0 + d - 1 : y0);
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
comm_dwell = same_dwell(comm_dwell, dwell);
}
} // for all boundary pixels
// reduce across threads in the block
__shared__ int ldwells[BSX * BSY];
int nt = min(d, BSX * BSY);
if(tid < nt)
ldwells[tid] = comm_dwell;
__syncthreads();
for(; nt > 1; nt /= 2) {
if(tid < nt / 2)
ldwells[tid] = same_dwell(ldwells[tid], ldwells[tid + nt / 2]);
__syncthreads();
}
return ldwells[0];
} // border_dwell
/** the kernel to fill the image region with a specific dwell value */
__global__ void dwell_fill_k
(int *dwells, int w, int x0, int y0, int d, int dwell) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < d && y < d) {
x += x0, y += y0;
dwells[y * w + x] = dwell;
}
} // dwell_fill_k
/** the kernel to fill in per-pixel values of the portion of the Mandelbrot set
*/
__global__ void mandelbrot_pixel_k
(int *dwells, int w, int h, complex cmin, complex cmax, int x0, int y0, int d) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x < d && y < d) {
x += x0, y += y0;
dwells[y * w + x] = pixel_dwell(w, h, cmin, cmax, x, y);
}
} // mandelbrot_pixel_k
/** checking for an error */
__device__ void check_error(int x0, int y0, int d) {
int err = cudaGetLastError();
if(err != cudaSuccess) {
printf("error launching kernel for region (%d..%d, %d..%d)\n",
x0, x0 + d, y0, y0 + d);
assert(0);
}
}
/** computes the dwells for Mandelbrot image using dynamic parallelism; one
block is launched per pixel
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
@param x0 the starting x coordinate of the portion to compute
@param y0 the starting y coordinate of the portion to compute
@param d the size of the portion to compute (the portion is always a square)
@param depth kernel invocation depth
@remarks the algorithm reverts to per-pixel Mandelbrot evaluation once
either maximum depth or minimum size is reached
*/
__global__ void mandelbrot_block_k
(int *dwells, int w, int h, complex cmin, complex cmax, int x0, int y0,
int d, int depth) {
x0 += d * blockIdx.x, y0 += d * blockIdx.y;
int comm_dwell = border_dwell(w, h, cmin, cmax, x0, y0, d);
if(threadIdx.x == 0 && threadIdx.y == 0) {
if(comm_dwell != DIFF_DWELL) {
// uniform dwell, just fill
dim3 bs(BSX, BSY), grid(divup(d, BSX), divup(d, BSY));
dwell_fill_k<<<grid, bs>>>(dwells, w, x0, y0, d, comm_dwell);
} else if(depth + 1 < MAX_DEPTH && d / SUBDIV > MIN_SIZE) {
// subdivide recursively
dim3 bs(blockDim.x, blockDim.y), grid(SUBDIV, SUBDIV);
mandelbrot_block_k<<<grid, bs>>>
(dwells, w, h, cmin, cmax, x0, y0, d / SUBDIV, depth + 1);
} else {
// leaf, per-pixel kernel
dim3 bs(BSX, BSY), grid(divup(d, BSX), divup(d, BSY));
mandelbrot_pixel_k<<<grid, bs>>>
(dwells, w, h, cmin, cmax, x0, y0, d);
}
cucheck_dev(cudaGetLastError());
//check_error(x0, y0, d);
}
} // mandelbrot_block_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
/** data size */
#define H (16 * 1024)
#define W (16 * 1024)
#define IMAGE_PATH "./mandelbrot.png"
int main(int argc, char **argv) {
// allocate memory
int w = W, h = H;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(cudaMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
dim3 bs(BSX, BSY), grid(INIT_SUBDIV, INIT_SUBDIV);
const int num_iter = 100;
// compute the dwells, copy them back
#define WITH_CUDA_GRAPH
#ifdef WITH_CUDA_GRAPH
printf("\n With cuda graph\n");
cudaStream_t stream;
cudaGraph_t graph;
cudaGraphExec_t cuda_graph_exec;
cucheck(cudaStreamCreate(&stream));
cucheck(cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal));
mandelbrot_block_k<<<grid, bs,0, stream>>>
(d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1), 0, 0, W / INIT_SUBDIV, 1);
cucheck(cudaStreamEndCapture(stream, &graph));
cucheck(cudaGraphInstantiate(&cuda_graph_exec, graph, NULL, NULL, 0));
cudaEvent_t start, stop;
cucheck(cudaEventCreate(&start));
cucheck(cudaEventCreate(&stop));
cucheck(cudaEventRecord(start, stream));
for(int iter = 0;iter < num_iter; ++iter){
cucheck(cudaGraphLaunch(cuda_graph_exec, stream));
cucheck(cudaStreamSynchronize(stream));
}
cucheck(cudaEventRecord(stop, stream));
cucheck(cudaEventSynchronize(stop));
cucheck(cudaDeviceSynchronize());
cucheck(cudaGetLastError());
cucheck(cudaGraphExecDestroy(cuda_graph_exec));
cucheck(cudaGraphDestroy(graph));
cucheck(cudaStreamDestroy(stream));
cucheck(cudaEventElapsedTime(&gpu_time, start, stop));
gpu_time /=static_cast<float>(num_iter);
#else
cudaEvent_t start, stop;
cucheck(cudaEventCreate(&start));
cucheck(cudaEventCreate(&stop));
cucheck(cudaEventRecord(start, NULL));
for(int iter = 0;iter < num_iter; ++iter){
mandelbrot_block_k<<<grid, bs>>>
(d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1), 0, 0, W / INIT_SUBDIV, 1);
}
cucheck(cudaEventRecord(stop, NULL));
cucheck(cudaEventSynchronize(stop));
cucheck(cudaDeviceSynchronize());
cucheck(cudaGetLastError());
cucheck(cudaEventElapsedTime(&gpu_time, start, stop));
gpu_time /=static_cast<float>(num_iter);
#endif
//cucheck(cudaMemcpy(h_dwells, d_dwells, dwell_sz, cudaMemcpyDeviceToHost));
// save the image to PNG file
//save_image(IMAGE_PATH, h_dwells, w, h);
// print performance
printf("Mandelbrot set computed in %.9f ms, at %.9f Mpix/s\n", gpu_time,
h * w * 1e-3 / gpu_time);
// free data
cudaFree(d_dwells);
free(h_dwells);
return 0;
} // main
|
518dd6a18bf5857419c7b839a77550a2b8677b6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUAPI.h"
#include "CUDA_ConstMemory.h"
#if ( defined GPU && MODEL == HYDRO )
extern real *d_EoS_Table[EOS_NTABLE_MAX];
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_SetConstMemory_EoS
// Description : Set the EoS constant memory variables on GPU
//
// Note : 1. Adopt the suggested approach for CUDA version >= 5.0
// 2. Invoked by EoS_Init()
//
// Parameter : None
//
// Return : c_EoS_AuxArray_Flt[], c_EoS_AuxArray_Int[], c_EoS_Table[]
// EoS.AuxArrayDevPtr_Flt, EoS.AuxArrayDevPtr_Int, EoS.Table
//---------------------------------------------------------------------------------------------------
void CUAPI_SetConstMemory_EoS()
{
// copy data to constant memory
CUDA_CHECK_ERROR( hipMemcpyToSymbol( c_EoS_AuxArray_Flt, EoS_AuxArray_Flt, EOS_NAUX_MAX *sizeof(double) ) );
CUDA_CHECK_ERROR( hipMemcpyToSymbol( c_EoS_AuxArray_Int, EoS_AuxArray_Int, EOS_NAUX_MAX *sizeof(int ) ) );
CUDA_CHECK_ERROR( hipMemcpyToSymbol( c_EoS_Table, d_EoS_Table, EOS_NTABLE_MAX*sizeof(real* ) ) );
// obtain the constant-memory pointers
CUDA_CHECK_ERROR( hipGetSymbolAddress( (void **)&EoS.AuxArrayDevPtr_Flt, c_EoS_AuxArray_Flt ) );
CUDA_CHECK_ERROR( hipGetSymbolAddress( (void **)&EoS.AuxArrayDevPtr_Int, c_EoS_AuxArray_Int ) );
CUDA_CHECK_ERROR( hipGetSymbolAddress( (void **)&EoS.Table, c_EoS_Table ) );
} // FUNCTION : CUAPI_SetConstMemory_EoS
#endif // #if ( defined GPU && MODEL == HYDRO )
| 518dd6a18bf5857419c7b839a77550a2b8677b6b.cu | #include "CUAPI.h"
#include "CUDA_ConstMemory.h"
#if ( defined GPU && MODEL == HYDRO )
extern real *d_EoS_Table[EOS_NTABLE_MAX];
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_SetConstMemory_EoS
// Description : Set the EoS constant memory variables on GPU
//
// Note : 1. Adopt the suggested approach for CUDA version >= 5.0
// 2. Invoked by EoS_Init()
//
// Parameter : None
//
// Return : c_EoS_AuxArray_Flt[], c_EoS_AuxArray_Int[], c_EoS_Table[]
// EoS.AuxArrayDevPtr_Flt, EoS.AuxArrayDevPtr_Int, EoS.Table
//---------------------------------------------------------------------------------------------------
void CUAPI_SetConstMemory_EoS()
{
// copy data to constant memory
CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_EoS_AuxArray_Flt, EoS_AuxArray_Flt, EOS_NAUX_MAX *sizeof(double) ) );
CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_EoS_AuxArray_Int, EoS_AuxArray_Int, EOS_NAUX_MAX *sizeof(int ) ) );
CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_EoS_Table, d_EoS_Table, EOS_NTABLE_MAX*sizeof(real* ) ) );
// obtain the constant-memory pointers
CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&EoS.AuxArrayDevPtr_Flt, c_EoS_AuxArray_Flt ) );
CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&EoS.AuxArrayDevPtr_Int, c_EoS_AuxArray_Int ) );
CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&EoS.Table, c_EoS_Table ) );
} // FUNCTION : CUAPI_SetConstMemory_EoS
#endif // #if ( defined GPU && MODEL == HYDRO )
|
607568c933d1c94c98e18a1414f92ff2e30999c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "voxel_hashing/map_proc.h"
#include "math/matrix_type.h"
#include "math/vector_type.h"
#include "utils/safe_call.h"
#include "voxel_hashing/prefix_sum.h"
#include "voxel_hashing/voxel_hashing.h"
#include "voxel_hashing/device_tables.h"
namespace fusion
{
namespace cuda
{
struct BuildVertexArray
{
MapStorage map_struct;
Vector3f *triangles;
HashEntry *block_array;
uint *block_count;
uint *triangle_count;
Vector3f *surface_normal;
FUSION_DEVICE inline void select_blocks() const
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool scan_required;
if (x == 0)
scan_required = false;
__syncthreads();
uint val = 0;
if (x < param.num_total_hash_entries_ && map_struct.hash_table_[x].ptr_ >= 0)
{
scan_required = true;
val = 1;
}
__syncthreads();
if (scan_required)
{
int offset = exclusive_scan<1024>(val, block_count);
if (offset != -1)
{
block_array[offset] = map_struct.hash_table_[x];
}
}
}
FUSION_DEVICE inline float read_sdf(Vector3f pt, bool &valid) const
{
Voxel *voxel = NULL;
findVoxel(map_struct, ToVector3i(pt), voxel);
if (voxel && voxel->weight != 0)
{
valid = true;
return voxel->getSDF();
}
else
{
valid = false;
return 0;
}
}
FUSION_DEVICE inline bool read_sdf_list(float *sdf, Vector3i pos) const
{
bool valid = false;
sdf[0] = read_sdf(pos + Vector3f(0, 0, 0), valid);
if (!valid)
return false;
sdf[1] = read_sdf(pos + Vector3f(1, 0, 0), valid);
if (!valid)
return false;
sdf[2] = read_sdf(pos + Vector3f(1, 1, 0), valid);
if (!valid)
return false;
sdf[3] = read_sdf(pos + Vector3f(0, 1, 0), valid);
if (!valid)
return false;
sdf[4] = read_sdf(pos + Vector3f(0, 0, 1), valid);
if (!valid)
return false;
sdf[5] = read_sdf(pos + Vector3f(1, 0, 1), valid);
if (!valid)
return false;
sdf[6] = read_sdf(pos + Vector3f(1, 1, 1), valid);
if (!valid)
return false;
sdf[7] = read_sdf(pos + Vector3f(0, 1, 1), valid);
if (!valid)
return false;
return true;
}
FUSION_DEVICE inline float interpolate_sdf(float &v1, float &v2) const
{
if (fabs(0 - v1) < 1e-6)
return 0;
if (fabs(0 - v2) < 1e-6)
return 1;
if (fabs(v1 - v2) < 1e-6)
return 0;
return (0 - v1) / (v2 - v1);
}
FUSION_DEVICE inline int make_vertex(Vector3f *vertex_array, const Vector3i pos)
{
float sdf[8];
if (!read_sdf_list(sdf, pos))
return -1;
int cube_index = 0;
if (sdf[0] < 0)
cube_index |= 1;
if (sdf[1] < 0)
cube_index |= 2;
if (sdf[2] < 0)
cube_index |= 4;
if (sdf[3] < 0)
cube_index |= 8;
if (sdf[4] < 0)
cube_index |= 16;
if (sdf[5] < 0)
cube_index |= 32;
if (sdf[6] < 0)
cube_index |= 64;
if (sdf[7] < 0)
cube_index |= 128;
if (edge_table[cube_index] == 0)
return -1;
if (edge_table[cube_index] & 1)
{
float val = interpolate_sdf(sdf[0], sdf[1]);
vertex_array[0] = pos + Vector3f(val, 0, 0);
}
if (edge_table[cube_index] & 2)
{
float val = interpolate_sdf(sdf[1], sdf[2]);
vertex_array[1] = pos + Vector3f(1, val, 0);
}
if (edge_table[cube_index] & 4)
{
float val = interpolate_sdf(sdf[2], sdf[3]);
vertex_array[2] = pos + Vector3f(1 - val, 1, 0);
}
if (edge_table[cube_index] & 8)
{
float val = interpolate_sdf(sdf[3], sdf[0]);
vertex_array[3] = pos + Vector3f(0, 1 - val, 0);
}
if (edge_table[cube_index] & 16)
{
float val = interpolate_sdf(sdf[4], sdf[5]);
vertex_array[4] = pos + Vector3f(val, 0, 1);
}
if (edge_table[cube_index] & 32)
{
float val = interpolate_sdf(sdf[5], sdf[6]);
vertex_array[5] = pos + Vector3f(1, val, 1);
}
if (edge_table[cube_index] & 64)
{
float val = interpolate_sdf(sdf[6], sdf[7]);
vertex_array[6] = pos + Vector3f(1 - val, 1, 1);
}
if (edge_table[cube_index] & 128)
{
float val = interpolate_sdf(sdf[7], sdf[4]);
vertex_array[7] = pos + Vector3f(0, 1 - val, 1);
}
if (edge_table[cube_index] & 256)
{
float val = interpolate_sdf(sdf[0], sdf[4]);
vertex_array[8] = pos + Vector3f(0, 0, val);
}
if (edge_table[cube_index] & 512)
{
float val = interpolate_sdf(sdf[1], sdf[5]);
vertex_array[9] = pos + Vector3f(1, 0, val);
}
if (edge_table[cube_index] & 1024)
{
float val = interpolate_sdf(sdf[2], sdf[6]);
vertex_array[10] = pos + Vector3f(1, 1, val);
}
if (edge_table[cube_index] & 2048)
{
float val = interpolate_sdf(sdf[3], sdf[7]);
vertex_array[11] = pos + Vector3f(0, 1, val);
}
return cube_index;
}
template <bool compute_normal = false>
FUSION_DEVICE inline void operator()()
{
int x = blockIdx.y * gridDim.x + blockIdx.x;
if (*triangle_count >= param.num_max_mesh_triangles_ || x >= *block_count)
return;
Vector3f vertex_array[12];
Vector3i pos = block_array[x].pos_ * BLOCK_SIZE;
auto factor = param.voxel_size;
for (int voxel_id = 0; voxel_id < BLOCK_SIZE; ++voxel_id)
{
Vector3i local_pos = Vector3i(threadIdx.x, threadIdx.y, voxel_id);
int cube_index = make_vertex(vertex_array, pos + local_pos);
if (cube_index <= 0)
continue;
for (int i = 0; triangle_table[cube_index][i] != -1; i += 3)
{
uint triangleId = atomicAdd(triangle_count, 1);
if (triangleId < param.num_max_mesh_triangles_)
{
triangles[triangleId * 3] = vertex_array[triangle_table[cube_index][i]] * factor;
triangles[triangleId * 3 + 1] = vertex_array[triangle_table[cube_index][i + 1]] * factor;
triangles[triangleId * 3 + 2] = vertex_array[triangle_table[cube_index][i + 2]] * factor;
if (compute_normal)
{
surface_normal[triangleId * 3] = normalised((triangles[triangleId * 3 + 1] - triangles[triangleId * 3]).cross(triangles[triangleId * 3 + 2] - triangles[triangleId * 3]));
surface_normal[triangleId * 3 + 1] = surface_normal[triangleId * 3 + 2] = surface_normal[triangleId * 3];
}
}
}
}
}
};
__global__ void select_blocks_kernel(BuildVertexArray bva)
{
bva.select_blocks();
}
__global__ void generate_vertex_array_kernel(BuildVertexArray bva)
{
bva.operator()<false>();
}
void create_mesh_vertex_only(
MapStorage map_struct,
MapState state,
uint &block_count,
HashEntry *block_list,
uint &triangle_count,
void *vertex_data)
{
uint *cuda_block_count;
uint *cuda_triangle_count;
safe_call(hipMalloc(&cuda_block_count, sizeof(uint)));
safe_call(hipMalloc(&cuda_triangle_count, sizeof(uint)));
safe_call(hipMemset(cuda_block_count, 0, sizeof(uint)));
safe_call(hipMemset(cuda_triangle_count, 0, sizeof(uint)));
BuildVertexArray bva;
bva.map_struct = map_struct;
bva.block_array = block_list;
bva.block_count = cuda_block_count;
bva.triangle_count = cuda_triangle_count;
bva.triangles = static_cast<Vector3f *>(vertex_data);
dim3 thread(1024);
dim3 block = dim3(div_up(state.num_total_hash_entries_, thread.x));
hipLaunchKernelGGL(( select_blocks_kernel), dim3(block), dim3(thread), 0, 0, bva);
safe_call(hipMemcpy(&block_count, cuda_block_count, sizeof(uint), hipMemcpyDeviceToHost));
if (block_count == 0)
return;
thread = dim3(8, 8);
block = dim3(div_up(block_count, 16), 16);
hipLaunchKernelGGL(( generate_vertex_array_kernel), dim3(block), dim3(thread), 0, 0, bva);
safe_call(hipMemcpy(&triangle_count, cuda_triangle_count, sizeof(uint), hipMemcpyDeviceToHost));
triangle_count = ::min(triangle_count, (uint)state.num_max_mesh_triangles_);
safe_call(hipFree(cuda_block_count));
safe_call(hipFree(cuda_triangle_count));
}
__global__ void generate_vertex_and_normal_array_kernel(BuildVertexArray bva)
{
bva.operator()<true>();
}
void create_mesh_with_normal(
MapStorage map_struct,
MapState state,
uint &block_count,
HashEntry *block_list,
uint &triangle_count,
void *vertex_data,
void *vertex_normal)
{
uint *cuda_block_count;
uint *cuda_triangle_count;
safe_call(hipMalloc(&cuda_block_count, sizeof(uint)));
safe_call(hipMalloc(&cuda_triangle_count, sizeof(uint)));
safe_call(hipMemset(cuda_block_count, 0, sizeof(uint)));
safe_call(hipMemset(cuda_triangle_count, 0, sizeof(uint)));
BuildVertexArray bva;
bva.map_struct = map_struct;
bva.block_array = block_list;
bva.block_count = cuda_block_count;
bva.triangle_count = cuda_triangle_count;
bva.triangles = static_cast<Vector3f *>(vertex_data);
bva.surface_normal = static_cast<Vector3f *>(vertex_normal);
dim3 thread(1024);
dim3 block = dim3(div_up(state.num_total_hash_entries_, thread.x));
hipLaunchKernelGGL(( select_blocks_kernel), dim3(block), dim3(thread), 0, 0, bva);
safe_call(hipMemcpy(&block_count, cuda_block_count, sizeof(uint), hipMemcpyDeviceToHost));
if (block_count == 0)
return;
thread = dim3(8, 8);
block = dim3(div_up(block_count, 16), 16);
hipLaunchKernelGGL(( generate_vertex_and_normal_array_kernel), dim3(block), dim3(thread), 0, 0, bva);
safe_call(hipMemcpy(&triangle_count, cuda_triangle_count, sizeof(uint), hipMemcpyDeviceToHost));
triangle_count = ::min(triangle_count, (uint)state.num_max_mesh_triangles_);
safe_call(hipFree(cuda_block_count));
safe_call(hipFree(cuda_triangle_count));
}
struct BuildVertexAndColourArray
{
MapStorage map_struct;
Vector3f *triangles;
HashEntry *block_array;
uint *block_count;
uint *triangle_count;
Vector3c *vertex_colour;
FUSION_DEVICE inline void select_blocks() const
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool scan_required;
if (x == 0)
scan_required = false;
__syncthreads();
uint val = 0;
if (x < param.num_total_hash_entries_ && map_struct.hash_table_[x].ptr_ >= 0)
{
scan_required = true;
val = 1;
}
__syncthreads();
if (scan_required)
{
int offset = exclusive_scan<1024>(val, block_count);
if (offset != -1)
{
block_array[offset] = map_struct.hash_table_[x];
}
}
}
FUSION_DEVICE inline void read_sdf_and_colour(Vector3f pt, bool &valid, float &sdf, Vector3c &colour) const
{
Voxel *vx = NULL;
findVoxel(map_struct, ToVector3i(pt), vx);
if (vx && vx->getWeight() > 1e-3)
{
valid = true;
sdf = vx->getSDF();
colour = vx->rgb;
}
else
{
valid = false;
}
}
FUSION_DEVICE inline bool read_sdf_and_colour_list(float *sdf, Vector3c *colour, Vector3i pos) const
{
bool valid = false;
read_sdf_and_colour(pos + Vector3f(0, 0, 0), valid, sdf[0], colour[0]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 0, 0), valid, sdf[1], colour[1]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 1, 0), valid, sdf[2], colour[2]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(0, 1, 0), valid, sdf[3], colour[3]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(0, 0, 1), valid, sdf[4], colour[4]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 0, 1), valid, sdf[5], colour[5]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 1, 1), valid, sdf[6], colour[6]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(0, 1, 1), valid, sdf[7], colour[7]);
if (!valid)
return false;
return true;
}
FUSION_DEVICE inline float interpolate_sdf(float &v1, float &v2) const
{
if (fabs(0 - v1) < 1e-6)
return 0;
if (fabs(0 - v2) < 1e-6)
return 1;
if (fabs(v1 - v2) < 1e-6)
return 0;
return (0 - v1) / (v2 - v1);
}
FUSION_DEVICE inline int make_vertex_and_colour(Vector3f *vertex_array, Vector3c *colour_array, const Vector3i pos)
{
float sdf[8];
if (!read_sdf_and_colour_list(sdf, colour_array, pos))
return -1;
int cube_index = 0;
if (sdf[0] < 0)
cube_index |= 1;
if (sdf[1] < 0)
cube_index |= 2;
if (sdf[2] < 0)
cube_index |= 4;
if (sdf[3] < 0)
cube_index |= 8;
if (sdf[4] < 0)
cube_index |= 16;
if (sdf[5] < 0)
cube_index |= 32;
if (sdf[6] < 0)
cube_index |= 64;
if (sdf[7] < 0)
cube_index |= 128;
if (edge_table[cube_index] == 0)
return -1;
if (edge_table[cube_index] & 1)
{
float val = interpolate_sdf(sdf[0], sdf[1]);
vertex_array[0] = pos + Vector3f(val, 0, 0);
}
if (edge_table[cube_index] & 2)
{
float val = interpolate_sdf(sdf[1], sdf[2]);
vertex_array[1] = pos + Vector3f(1, val, 0);
}
if (edge_table[cube_index] & 4)
{
float val = interpolate_sdf(sdf[2], sdf[3]);
vertex_array[2] = pos + Vector3f(1 - val, 1, 0);
}
if (edge_table[cube_index] & 8)
{
float val = interpolate_sdf(sdf[3], sdf[0]);
vertex_array[3] = pos + Vector3f(0, 1 - val, 0);
}
if (edge_table[cube_index] & 16)
{
float val = interpolate_sdf(sdf[4], sdf[5]);
vertex_array[4] = pos + Vector3f(val, 0, 1);
}
if (edge_table[cube_index] & 32)
{
float val = interpolate_sdf(sdf[5], sdf[6]);
vertex_array[5] = pos + Vector3f(1, val, 1);
}
if (edge_table[cube_index] & 64)
{
float val = interpolate_sdf(sdf[6], sdf[7]);
vertex_array[6] = pos + Vector3f(1 - val, 1, 1);
}
if (edge_table[cube_index] & 128)
{
float val = interpolate_sdf(sdf[7], sdf[4]);
vertex_array[7] = pos + Vector3f(0, 1 - val, 1);
}
if (edge_table[cube_index] & 256)
{
float val = interpolate_sdf(sdf[0], sdf[4]);
vertex_array[8] = pos + Vector3f(0, 0, val);
colour_array[8] = colour_array[0];
}
if (edge_table[cube_index] & 512)
{
float val = interpolate_sdf(sdf[1], sdf[5]);
vertex_array[9] = pos + Vector3f(1, 0, val);
colour_array[9] = colour_array[1];
}
if (edge_table[cube_index] & 1024)
{
float val = interpolate_sdf(sdf[2], sdf[6]);
vertex_array[10] = pos + Vector3f(1, 1, val);
colour_array[10] = colour_array[2];
}
if (edge_table[cube_index] & 2048)
{
float val = interpolate_sdf(sdf[3], sdf[7]);
vertex_array[11] = pos + Vector3f(0, 1, val);
colour_array[11] = colour_array[3];
}
return cube_index;
}
FUSION_DEVICE inline void operator()()
{
int x = blockIdx.y * gridDim.x + blockIdx.x;
if (*triangle_count >= param.num_max_mesh_triangles_ || x >= *block_count)
return;
Vector3f vertex_array[12];
Vector3c colour_array[12];
Vector3i pos = block_array[x].pos_ * BLOCK_SIZE;
auto factor = param.voxel_size;
for (int voxel_id = 0; voxel_id < BLOCK_SIZE; ++voxel_id)
{
Vector3i local_pos = Vector3i(threadIdx.x, threadIdx.y, voxel_id);
int cube_index = make_vertex_and_colour(vertex_array, colour_array, pos + local_pos);
if (cube_index <= 0)
continue;
for (int i = 0; triangle_table[cube_index][i] != -1; i += 3)
{
uint triangleId = atomicAdd(triangle_count, 1);
if (triangleId < param.num_max_mesh_triangles_)
{
triangles[triangleId * 3] = vertex_array[triangle_table[cube_index][i]] * factor;
triangles[triangleId * 3 + 1] = vertex_array[triangle_table[cube_index][i + 1]] * factor;
triangles[triangleId * 3 + 2] = vertex_array[triangle_table[cube_index][i + 2]] * factor;
vertex_colour[triangleId * 3] = colour_array[triangle_table[cube_index][i]];
vertex_colour[triangleId * 3 + 1] = colour_array[triangle_table[cube_index][i + 1]];
vertex_colour[triangleId * 3 + 2] = colour_array[triangle_table[cube_index][i + 2]];
}
}
}
}
};
__global__ void select_blocks_coloured_kernel(BuildVertexAndColourArray delegate)
{
delegate.select_blocks();
}
__global__ void generate_vertex_and_colour_array_kernel(BuildVertexAndColourArray delegate)
{
delegate();
}
void create_mesh_with_colour(
MapStorage map_struct,
MapState state,
uint &block_count,
HashEntry *block_list,
uint &triangle_count,
void *vertex_data,
void *vertex_colour)
{
uint *cuda_block_count;
uint *cuda_triangle_count;
safe_call(hipMalloc(&cuda_block_count, sizeof(uint)));
safe_call(hipMalloc(&cuda_triangle_count, sizeof(uint)));
safe_call(hipMemset(cuda_block_count, 0, sizeof(uint)));
safe_call(hipMemset(cuda_triangle_count, 0, sizeof(uint)));
BuildVertexAndColourArray delegate;
delegate.map_struct = map_struct;
delegate.block_array = block_list;
delegate.block_count = cuda_block_count;
delegate.triangle_count = cuda_triangle_count;
delegate.triangles = static_cast<Vector3f *>(vertex_data);
delegate.vertex_colour = static_cast<Vector3c *>(vertex_colour);
dim3 thread(1024);
dim3 block = dim3(div_up(state.num_total_hash_entries_, thread.x));
hipLaunchKernelGGL(( select_blocks_coloured_kernel), dim3(block), dim3(thread), 0, 0, delegate);
safe_call(hipMemcpy(&block_count, cuda_block_count, sizeof(uint), hipMemcpyDeviceToHost));
if (block_count == 0)
return;
thread = dim3(8, 8);
block = dim3(div_up(block_count, 16), 16);
hipLaunchKernelGGL(( generate_vertex_and_colour_array_kernel), dim3(block), dim3(thread), 0, 0, delegate);
safe_call(hipMemcpy(&triangle_count, cuda_triangle_count, sizeof(uint), hipMemcpyDeviceToHost));
triangle_count = ::min(triangle_count, (uint)state.num_max_mesh_triangles_);
safe_call(hipFree(cuda_block_count));
safe_call(hipFree(cuda_triangle_count));
}
} // namespace cuda
} // namespace fusion | 607568c933d1c94c98e18a1414f92ff2e30999c8.cu | #include "voxel_hashing/map_proc.h"
#include "math/matrix_type.h"
#include "math/vector_type.h"
#include "utils/safe_call.h"
#include "voxel_hashing/prefix_sum.h"
#include "voxel_hashing/voxel_hashing.h"
#include "voxel_hashing/device_tables.h"
namespace fusion
{
namespace cuda
{
struct BuildVertexArray
{
MapStorage map_struct;
Vector3f *triangles;
HashEntry *block_array;
uint *block_count;
uint *triangle_count;
Vector3f *surface_normal;
FUSION_DEVICE inline void select_blocks() const
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool scan_required;
if (x == 0)
scan_required = false;
__syncthreads();
uint val = 0;
if (x < param.num_total_hash_entries_ && map_struct.hash_table_[x].ptr_ >= 0)
{
scan_required = true;
val = 1;
}
__syncthreads();
if (scan_required)
{
int offset = exclusive_scan<1024>(val, block_count);
if (offset != -1)
{
block_array[offset] = map_struct.hash_table_[x];
}
}
}
FUSION_DEVICE inline float read_sdf(Vector3f pt, bool &valid) const
{
Voxel *voxel = NULL;
findVoxel(map_struct, ToVector3i(pt), voxel);
if (voxel && voxel->weight != 0)
{
valid = true;
return voxel->getSDF();
}
else
{
valid = false;
return 0;
}
}
FUSION_DEVICE inline bool read_sdf_list(float *sdf, Vector3i pos) const
{
bool valid = false;
sdf[0] = read_sdf(pos + Vector3f(0, 0, 0), valid);
if (!valid)
return false;
sdf[1] = read_sdf(pos + Vector3f(1, 0, 0), valid);
if (!valid)
return false;
sdf[2] = read_sdf(pos + Vector3f(1, 1, 0), valid);
if (!valid)
return false;
sdf[3] = read_sdf(pos + Vector3f(0, 1, 0), valid);
if (!valid)
return false;
sdf[4] = read_sdf(pos + Vector3f(0, 0, 1), valid);
if (!valid)
return false;
sdf[5] = read_sdf(pos + Vector3f(1, 0, 1), valid);
if (!valid)
return false;
sdf[6] = read_sdf(pos + Vector3f(1, 1, 1), valid);
if (!valid)
return false;
sdf[7] = read_sdf(pos + Vector3f(0, 1, 1), valid);
if (!valid)
return false;
return true;
}
FUSION_DEVICE inline float interpolate_sdf(float &v1, float &v2) const
{
if (fabs(0 - v1) < 1e-6)
return 0;
if (fabs(0 - v2) < 1e-6)
return 1;
if (fabs(v1 - v2) < 1e-6)
return 0;
return (0 - v1) / (v2 - v1);
}
FUSION_DEVICE inline int make_vertex(Vector3f *vertex_array, const Vector3i pos)
{
float sdf[8];
if (!read_sdf_list(sdf, pos))
return -1;
int cube_index = 0;
if (sdf[0] < 0)
cube_index |= 1;
if (sdf[1] < 0)
cube_index |= 2;
if (sdf[2] < 0)
cube_index |= 4;
if (sdf[3] < 0)
cube_index |= 8;
if (sdf[4] < 0)
cube_index |= 16;
if (sdf[5] < 0)
cube_index |= 32;
if (sdf[6] < 0)
cube_index |= 64;
if (sdf[7] < 0)
cube_index |= 128;
if (edge_table[cube_index] == 0)
return -1;
if (edge_table[cube_index] & 1)
{
float val = interpolate_sdf(sdf[0], sdf[1]);
vertex_array[0] = pos + Vector3f(val, 0, 0);
}
if (edge_table[cube_index] & 2)
{
float val = interpolate_sdf(sdf[1], sdf[2]);
vertex_array[1] = pos + Vector3f(1, val, 0);
}
if (edge_table[cube_index] & 4)
{
float val = interpolate_sdf(sdf[2], sdf[3]);
vertex_array[2] = pos + Vector3f(1 - val, 1, 0);
}
if (edge_table[cube_index] & 8)
{
float val = interpolate_sdf(sdf[3], sdf[0]);
vertex_array[3] = pos + Vector3f(0, 1 - val, 0);
}
if (edge_table[cube_index] & 16)
{
float val = interpolate_sdf(sdf[4], sdf[5]);
vertex_array[4] = pos + Vector3f(val, 0, 1);
}
if (edge_table[cube_index] & 32)
{
float val = interpolate_sdf(sdf[5], sdf[6]);
vertex_array[5] = pos + Vector3f(1, val, 1);
}
if (edge_table[cube_index] & 64)
{
float val = interpolate_sdf(sdf[6], sdf[7]);
vertex_array[6] = pos + Vector3f(1 - val, 1, 1);
}
if (edge_table[cube_index] & 128)
{
float val = interpolate_sdf(sdf[7], sdf[4]);
vertex_array[7] = pos + Vector3f(0, 1 - val, 1);
}
if (edge_table[cube_index] & 256)
{
float val = interpolate_sdf(sdf[0], sdf[4]);
vertex_array[8] = pos + Vector3f(0, 0, val);
}
if (edge_table[cube_index] & 512)
{
float val = interpolate_sdf(sdf[1], sdf[5]);
vertex_array[9] = pos + Vector3f(1, 0, val);
}
if (edge_table[cube_index] & 1024)
{
float val = interpolate_sdf(sdf[2], sdf[6]);
vertex_array[10] = pos + Vector3f(1, 1, val);
}
if (edge_table[cube_index] & 2048)
{
float val = interpolate_sdf(sdf[3], sdf[7]);
vertex_array[11] = pos + Vector3f(0, 1, val);
}
return cube_index;
}
template <bool compute_normal = false>
FUSION_DEVICE inline void operator()()
{
int x = blockIdx.y * gridDim.x + blockIdx.x;
if (*triangle_count >= param.num_max_mesh_triangles_ || x >= *block_count)
return;
Vector3f vertex_array[12];
Vector3i pos = block_array[x].pos_ * BLOCK_SIZE;
auto factor = param.voxel_size;
for (int voxel_id = 0; voxel_id < BLOCK_SIZE; ++voxel_id)
{
Vector3i local_pos = Vector3i(threadIdx.x, threadIdx.y, voxel_id);
int cube_index = make_vertex(vertex_array, pos + local_pos);
if (cube_index <= 0)
continue;
for (int i = 0; triangle_table[cube_index][i] != -1; i += 3)
{
uint triangleId = atomicAdd(triangle_count, 1);
if (triangleId < param.num_max_mesh_triangles_)
{
triangles[triangleId * 3] = vertex_array[triangle_table[cube_index][i]] * factor;
triangles[triangleId * 3 + 1] = vertex_array[triangle_table[cube_index][i + 1]] * factor;
triangles[triangleId * 3 + 2] = vertex_array[triangle_table[cube_index][i + 2]] * factor;
if (compute_normal)
{
surface_normal[triangleId * 3] = normalised((triangles[triangleId * 3 + 1] - triangles[triangleId * 3]).cross(triangles[triangleId * 3 + 2] - triangles[triangleId * 3]));
surface_normal[triangleId * 3 + 1] = surface_normal[triangleId * 3 + 2] = surface_normal[triangleId * 3];
}
}
}
}
}
};
__global__ void select_blocks_kernel(BuildVertexArray bva)
{
bva.select_blocks();
}
__global__ void generate_vertex_array_kernel(BuildVertexArray bva)
{
bva.operator()<false>();
}
void create_mesh_vertex_only(
MapStorage map_struct,
MapState state,
uint &block_count,
HashEntry *block_list,
uint &triangle_count,
void *vertex_data)
{
uint *cuda_block_count;
uint *cuda_triangle_count;
safe_call(cudaMalloc(&cuda_block_count, sizeof(uint)));
safe_call(cudaMalloc(&cuda_triangle_count, sizeof(uint)));
safe_call(cudaMemset(cuda_block_count, 0, sizeof(uint)));
safe_call(cudaMemset(cuda_triangle_count, 0, sizeof(uint)));
BuildVertexArray bva;
bva.map_struct = map_struct;
bva.block_array = block_list;
bva.block_count = cuda_block_count;
bva.triangle_count = cuda_triangle_count;
bva.triangles = static_cast<Vector3f *>(vertex_data);
dim3 thread(1024);
dim3 block = dim3(div_up(state.num_total_hash_entries_, thread.x));
select_blocks_kernel<<<block, thread>>>(bva);
safe_call(cudaMemcpy(&block_count, cuda_block_count, sizeof(uint), cudaMemcpyDeviceToHost));
if (block_count == 0)
return;
thread = dim3(8, 8);
block = dim3(div_up(block_count, 16), 16);
generate_vertex_array_kernel<<<block, thread>>>(bva);
safe_call(cudaMemcpy(&triangle_count, cuda_triangle_count, sizeof(uint), cudaMemcpyDeviceToHost));
triangle_count = std::min(triangle_count, (uint)state.num_max_mesh_triangles_);
safe_call(cudaFree(cuda_block_count));
safe_call(cudaFree(cuda_triangle_count));
}
__global__ void generate_vertex_and_normal_array_kernel(BuildVertexArray bva)
{
bva.operator()<true>();
}
void create_mesh_with_normal(
MapStorage map_struct,
MapState state,
uint &block_count,
HashEntry *block_list,
uint &triangle_count,
void *vertex_data,
void *vertex_normal)
{
uint *cuda_block_count;
uint *cuda_triangle_count;
safe_call(cudaMalloc(&cuda_block_count, sizeof(uint)));
safe_call(cudaMalloc(&cuda_triangle_count, sizeof(uint)));
safe_call(cudaMemset(cuda_block_count, 0, sizeof(uint)));
safe_call(cudaMemset(cuda_triangle_count, 0, sizeof(uint)));
BuildVertexArray bva;
bva.map_struct = map_struct;
bva.block_array = block_list;
bva.block_count = cuda_block_count;
bva.triangle_count = cuda_triangle_count;
bva.triangles = static_cast<Vector3f *>(vertex_data);
bva.surface_normal = static_cast<Vector3f *>(vertex_normal);
dim3 thread(1024);
dim3 block = dim3(div_up(state.num_total_hash_entries_, thread.x));
select_blocks_kernel<<<block, thread>>>(bva);
safe_call(cudaMemcpy(&block_count, cuda_block_count, sizeof(uint), cudaMemcpyDeviceToHost));
if (block_count == 0)
return;
thread = dim3(8, 8);
block = dim3(div_up(block_count, 16), 16);
generate_vertex_and_normal_array_kernel<<<block, thread>>>(bva);
safe_call(cudaMemcpy(&triangle_count, cuda_triangle_count, sizeof(uint), cudaMemcpyDeviceToHost));
triangle_count = std::min(triangle_count, (uint)state.num_max_mesh_triangles_);
safe_call(cudaFree(cuda_block_count));
safe_call(cudaFree(cuda_triangle_count));
}
struct BuildVertexAndColourArray
{
MapStorage map_struct;
Vector3f *triangles;
HashEntry *block_array;
uint *block_count;
uint *triangle_count;
Vector3c *vertex_colour;
FUSION_DEVICE inline void select_blocks() const
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool scan_required;
if (x == 0)
scan_required = false;
__syncthreads();
uint val = 0;
if (x < param.num_total_hash_entries_ && map_struct.hash_table_[x].ptr_ >= 0)
{
scan_required = true;
val = 1;
}
__syncthreads();
if (scan_required)
{
int offset = exclusive_scan<1024>(val, block_count);
if (offset != -1)
{
block_array[offset] = map_struct.hash_table_[x];
}
}
}
FUSION_DEVICE inline void read_sdf_and_colour(Vector3f pt, bool &valid, float &sdf, Vector3c &colour) const
{
Voxel *vx = NULL;
findVoxel(map_struct, ToVector3i(pt), vx);
if (vx && vx->getWeight() > 1e-3)
{
valid = true;
sdf = vx->getSDF();
colour = vx->rgb;
}
else
{
valid = false;
}
}
FUSION_DEVICE inline bool read_sdf_and_colour_list(float *sdf, Vector3c *colour, Vector3i pos) const
{
bool valid = false;
read_sdf_and_colour(pos + Vector3f(0, 0, 0), valid, sdf[0], colour[0]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 0, 0), valid, sdf[1], colour[1]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 1, 0), valid, sdf[2], colour[2]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(0, 1, 0), valid, sdf[3], colour[3]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(0, 0, 1), valid, sdf[4], colour[4]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 0, 1), valid, sdf[5], colour[5]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(1, 1, 1), valid, sdf[6], colour[6]);
if (!valid)
return false;
read_sdf_and_colour(pos + Vector3f(0, 1, 1), valid, sdf[7], colour[7]);
if (!valid)
return false;
return true;
}
FUSION_DEVICE inline float interpolate_sdf(float &v1, float &v2) const
{
if (fabs(0 - v1) < 1e-6)
return 0;
if (fabs(0 - v2) < 1e-6)
return 1;
if (fabs(v1 - v2) < 1e-6)
return 0;
return (0 - v1) / (v2 - v1);
}
FUSION_DEVICE inline int make_vertex_and_colour(Vector3f *vertex_array, Vector3c *colour_array, const Vector3i pos)
{
float sdf[8];
if (!read_sdf_and_colour_list(sdf, colour_array, pos))
return -1;
int cube_index = 0;
if (sdf[0] < 0)
cube_index |= 1;
if (sdf[1] < 0)
cube_index |= 2;
if (sdf[2] < 0)
cube_index |= 4;
if (sdf[3] < 0)
cube_index |= 8;
if (sdf[4] < 0)
cube_index |= 16;
if (sdf[5] < 0)
cube_index |= 32;
if (sdf[6] < 0)
cube_index |= 64;
if (sdf[7] < 0)
cube_index |= 128;
if (edge_table[cube_index] == 0)
return -1;
if (edge_table[cube_index] & 1)
{
float val = interpolate_sdf(sdf[0], sdf[1]);
vertex_array[0] = pos + Vector3f(val, 0, 0);
}
if (edge_table[cube_index] & 2)
{
float val = interpolate_sdf(sdf[1], sdf[2]);
vertex_array[1] = pos + Vector3f(1, val, 0);
}
if (edge_table[cube_index] & 4)
{
float val = interpolate_sdf(sdf[2], sdf[3]);
vertex_array[2] = pos + Vector3f(1 - val, 1, 0);
}
if (edge_table[cube_index] & 8)
{
float val = interpolate_sdf(sdf[3], sdf[0]);
vertex_array[3] = pos + Vector3f(0, 1 - val, 0);
}
if (edge_table[cube_index] & 16)
{
float val = interpolate_sdf(sdf[4], sdf[5]);
vertex_array[4] = pos + Vector3f(val, 0, 1);
}
if (edge_table[cube_index] & 32)
{
float val = interpolate_sdf(sdf[5], sdf[6]);
vertex_array[5] = pos + Vector3f(1, val, 1);
}
if (edge_table[cube_index] & 64)
{
float val = interpolate_sdf(sdf[6], sdf[7]);
vertex_array[6] = pos + Vector3f(1 - val, 1, 1);
}
if (edge_table[cube_index] & 128)
{
float val = interpolate_sdf(sdf[7], sdf[4]);
vertex_array[7] = pos + Vector3f(0, 1 - val, 1);
}
if (edge_table[cube_index] & 256)
{
float val = interpolate_sdf(sdf[0], sdf[4]);
vertex_array[8] = pos + Vector3f(0, 0, val);
colour_array[8] = colour_array[0];
}
if (edge_table[cube_index] & 512)
{
float val = interpolate_sdf(sdf[1], sdf[5]);
vertex_array[9] = pos + Vector3f(1, 0, val);
colour_array[9] = colour_array[1];
}
if (edge_table[cube_index] & 1024)
{
float val = interpolate_sdf(sdf[2], sdf[6]);
vertex_array[10] = pos + Vector3f(1, 1, val);
colour_array[10] = colour_array[2];
}
if (edge_table[cube_index] & 2048)
{
float val = interpolate_sdf(sdf[3], sdf[7]);
vertex_array[11] = pos + Vector3f(0, 1, val);
colour_array[11] = colour_array[3];
}
return cube_index;
}
FUSION_DEVICE inline void operator()()
{
int x = blockIdx.y * gridDim.x + blockIdx.x;
if (*triangle_count >= param.num_max_mesh_triangles_ || x >= *block_count)
return;
Vector3f vertex_array[12];
Vector3c colour_array[12];
Vector3i pos = block_array[x].pos_ * BLOCK_SIZE;
auto factor = param.voxel_size;
for (int voxel_id = 0; voxel_id < BLOCK_SIZE; ++voxel_id)
{
Vector3i local_pos = Vector3i(threadIdx.x, threadIdx.y, voxel_id);
int cube_index = make_vertex_and_colour(vertex_array, colour_array, pos + local_pos);
if (cube_index <= 0)
continue;
for (int i = 0; triangle_table[cube_index][i] != -1; i += 3)
{
uint triangleId = atomicAdd(triangle_count, 1);
if (triangleId < param.num_max_mesh_triangles_)
{
triangles[triangleId * 3] = vertex_array[triangle_table[cube_index][i]] * factor;
triangles[triangleId * 3 + 1] = vertex_array[triangle_table[cube_index][i + 1]] * factor;
triangles[triangleId * 3 + 2] = vertex_array[triangle_table[cube_index][i + 2]] * factor;
vertex_colour[triangleId * 3] = colour_array[triangle_table[cube_index][i]];
vertex_colour[triangleId * 3 + 1] = colour_array[triangle_table[cube_index][i + 1]];
vertex_colour[triangleId * 3 + 2] = colour_array[triangle_table[cube_index][i + 2]];
}
}
}
}
};
__global__ void select_blocks_coloured_kernel(BuildVertexAndColourArray delegate)
{
delegate.select_blocks();
}
__global__ void generate_vertex_and_colour_array_kernel(BuildVertexAndColourArray delegate)
{
delegate();
}
void create_mesh_with_colour(
MapStorage map_struct,
MapState state,
uint &block_count,
HashEntry *block_list,
uint &triangle_count,
void *vertex_data,
void *vertex_colour)
{
uint *cuda_block_count;
uint *cuda_triangle_count;
safe_call(cudaMalloc(&cuda_block_count, sizeof(uint)));
safe_call(cudaMalloc(&cuda_triangle_count, sizeof(uint)));
safe_call(cudaMemset(cuda_block_count, 0, sizeof(uint)));
safe_call(cudaMemset(cuda_triangle_count, 0, sizeof(uint)));
BuildVertexAndColourArray delegate;
delegate.map_struct = map_struct;
delegate.block_array = block_list;
delegate.block_count = cuda_block_count;
delegate.triangle_count = cuda_triangle_count;
delegate.triangles = static_cast<Vector3f *>(vertex_data);
delegate.vertex_colour = static_cast<Vector3c *>(vertex_colour);
dim3 thread(1024);
dim3 block = dim3(div_up(state.num_total_hash_entries_, thread.x));
select_blocks_coloured_kernel<<<block, thread>>>(delegate);
safe_call(cudaMemcpy(&block_count, cuda_block_count, sizeof(uint), cudaMemcpyDeviceToHost));
if (block_count == 0)
return;
thread = dim3(8, 8);
block = dim3(div_up(block_count, 16), 16);
generate_vertex_and_colour_array_kernel<<<block, thread>>>(delegate);
safe_call(cudaMemcpy(&triangle_count, cuda_triangle_count, sizeof(uint), cudaMemcpyDeviceToHost));
triangle_count = std::min(triangle_count, (uint)state.num_max_mesh_triangles_);
safe_call(cudaFree(cuda_block_count));
safe_call(cudaFree(cuda_triangle_count));
}
} // namespace cuda
} // namespace fusion |
ef677a76ed0bdf2d757fef9d2dee2e51add4fb1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "blas.h"
#include "math_function.h"
#include "math_function_impl.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor,
float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_,
static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context,
framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(
const T* a, const T* b, T* c, int width, int num) {
T tmp = 1.0 / width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector,
framework::Tensor* output) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector.numel(), size);
PADDLE_ENFORCE_EQ(output->dims(), in_dims);
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
hipLaunchKernelGGL(( RowwiseAddKernel<T>), dim3(grids), dim3(blocks), 0, context.stream(),
input.data<T>(),
vector.data<T>(),
output->data<T>(),
static_cast<int>(in_dims[1]),
static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size);
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true,
static_cast<int>(in_dims[0]),
static_cast<int>(in_dims[1]),
1.0,
input.data<double>(),
one.data<double>(),
0.0,
vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]);
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true,
static_cast<int>(in_dims[1]),
static_cast<int>(in_dims[0]),
1.0,
one.data<double>(),
input.data<double>(),
0.0,
vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| ef677a76ed0bdf2d757fef9d2dee2e51add4fb1d.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "blas.h"
#include "math_function.h"
#include "math_function_impl.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor,
float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_,
static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context,
framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(
const T* a, const T* b, T* c, int width, int num) {
T tmp = 1.0 / width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector,
framework::Tensor* output) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector.numel(), size);
PADDLE_ENFORCE_EQ(output->dims(), in_dims);
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
RowwiseAddKernel<T><<<grids, blocks, 0, context.stream()>>>(
input.data<T>(),
vector.data<T>(),
output->data<T>(),
static_cast<int>(in_dims[1]),
static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size);
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true,
static_cast<int>(in_dims[0]),
static_cast<int>(in_dims[1]),
1.0,
input.data<double>(),
one.data<double>(),
0.0,
vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]);
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true,
static_cast<int>(in_dims[1]),
static_cast<int>(in_dims[0]),
1.0,
one.data<double>(),
input.data<double>(),
0.0,
vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
05ef9dccc6d11b64a5039a68c4e5d1ff4d6488e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
#include "spatial_sigmoid_op.h"
namespace caffe2 {
namespace {
__global__ void SpatialSigmoidKernel(const int N,
const float* logits, const float* targets,float* loss) {
CUDA_1D_KERNEL_LOOP(index, N) {
loss[index]=-1. * logits[index] * (targets[index] - (logits[index] >= 0)) +
logf(1 + expf(logits[index] - 2 * logits[index] * (logits[index] >= 0)));
}
}
} // namespace
template <>
bool SpatialSigmoidOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
int N = X.size();
auto* loss = Output(0);
loss->ResizeLike(X);
hipLaunchKernelGGL(( SpatialSigmoidKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,X.data<float>(), Y.data<float>(), loss->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SpatialSigmoid,
SpatialSigmoidOp<float, CUDAContext>);
} | 05ef9dccc6d11b64a5039a68c4e5d1ff4d6488e4.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
#include "spatial_sigmoid_op.h"
namespace caffe2 {
namespace {
__global__ void SpatialSigmoidKernel(const int N,
const float* logits, const float* targets,float* loss) {
CUDA_1D_KERNEL_LOOP(index, N) {
loss[index]=-1. * logits[index] * (targets[index] - (logits[index] >= 0)) +
logf(1 + expf(logits[index] - 2 * logits[index] * (logits[index] >= 0)));
}
}
} // namespace
template <>
bool SpatialSigmoidOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
int N = X.size();
auto* loss = Output(0);
loss->ResizeLike(X);
SpatialSigmoidKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,X.data<float>(), Y.data<float>(), loss->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SpatialSigmoid,
SpatialSigmoidOp<float, CUDAContext>);
} |
60fa39de32d03369655fde3c9a47d8a656178bfd.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/generic/constant.h"
#include <hip/hip_runtime.h>
#include <vector>
#include "dali/core/convert.h"
#include "dali/core/static_switch.h"
#include "dali/pipeline/data/views.h"
#include "dali/kernels/common/scatter_gather.h"
namespace dali {
namespace {
template <size_t size, size_t alignment>
struct alignas(alignment) Placeholder {
char payload[size]; // NOLINT(runtime/arrays)
};
template <typename T>
inline auto opaque(const T &value) {
Placeholder<sizeof(T), alignof(T)> placeholder;
memcpy(placeholder.payload, &value, sizeof(T));
return placeholder;
}
template <size_t size, size_t alignment>
__global__ void Fill(void *data, size_t count, Placeholder<size, alignment> value) {
auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < count)
static_cast<Placeholder<size, alignment>*>(data)[i] = value;
}
// TODO(klecki): [Conditional] - replace by sharing repeated sample
template <typename Dst, typename Src>
void FillTensorList(
TensorList<GPUBackend> &dst, const TensorListShape<> &shape, const std::vector<Src> &src,
hipStream_t stream) {
dst.set_type<Dst>();
dst.Resize(shape);
if (shape.num_samples() == 0)
return;
int64_t sample_size = shape[0].num_elements();
if (src.size() == 1) {
int64_t threads = 1024;
int64_t blocks = div_ceil(sample_size, threads);
Dst *data = dst.mutable_tensor<Dst>(0);
hipLaunchKernelGGL(( Fill), dim3(dim3(blocks)), dim3(dim3(threads)), 0, stream, data, sample_size,
opaque(ConvertSat<Dst>(src[0])));
} else {
SmallVector<Dst, 64> tmp;
assert(static_cast<int>(src.size()) == sample_size);
tmp.resize(src.size());
for (size_t i = 0; i < tmp.size(); i++)
tmp[i] = ConvertSat<Dst>(src[i]);
int n = tmp.size() * sizeof(Dst);
CUDA_CALL(
hipMemcpyAsync(dst.mutable_tensor<Dst>(0), tmp.data(), n, hipMemcpyHostToDevice, stream));
}
kernels::ScatterGatherGPU scatter_gather;
for (int i = 1; i < shape.num_samples(); i++) {
scatter_gather.AddCopy(dst.mutable_tensor<Dst>(i), dst.mutable_tensor<Dst>(0),
sample_size * sizeof(Dst));
}
scatter_gather.Run(stream);
}
} // namespace
template <>
void Constant<GPUBackend>::RunImpl(Workspace &ws) {
output_.set_order(ws.output_order());
if (output_.num_samples() == 0) {
TYPE_SWITCH(output_type_, type2id, type, CONSTANT_OP_SUPPORTED_TYPES,
(
if (!fdata_.empty()) {
FillTensorList<type>(output_, max_output_shape_, fdata_, ws.stream());
} else {
FillTensorList<type>(output_, max_output_shape_, idata_, ws.stream());
}
), (DALI_FAIL(make_string("Unsupported type: ", output_type_)))); // NOLINT
}
auto &out = ws.Output<GPUBackend>(0);
out.Reset();
out.ShareData(output_);
out.Resize(output_shape_);
int N = output_shape_.num_samples();
for (int i = 0; i < N; i++) {
assert(out.raw_tensor(i) == output_.raw_tensor(i));
}
out.SetLayout(layout_);
}
DALI_REGISTER_OPERATOR(Constant, Constant<GPUBackend>, GPU);
} // namespace dali
| 60fa39de32d03369655fde3c9a47d8a656178bfd.cu | // Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/operators/generic/constant.h"
#include <cuda_runtime.h>
#include <vector>
#include "dali/core/convert.h"
#include "dali/core/static_switch.h"
#include "dali/pipeline/data/views.h"
#include "dali/kernels/common/scatter_gather.h"
namespace dali {
namespace {
template <size_t size, size_t alignment>
struct alignas(alignment) Placeholder {
char payload[size]; // NOLINT(runtime/arrays)
};
template <typename T>
inline auto opaque(const T &value) {
Placeholder<sizeof(T), alignof(T)> placeholder;
memcpy(placeholder.payload, &value, sizeof(T));
return placeholder;
}
template <size_t size, size_t alignment>
__global__ void Fill(void *data, size_t count, Placeholder<size, alignment> value) {
auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < count)
static_cast<Placeholder<size, alignment>*>(data)[i] = value;
}
// TODO(klecki): [Conditional] - replace by sharing repeated sample
template <typename Dst, typename Src>
void FillTensorList(
TensorList<GPUBackend> &dst, const TensorListShape<> &shape, const std::vector<Src> &src,
cudaStream_t stream) {
dst.set_type<Dst>();
dst.Resize(shape);
if (shape.num_samples() == 0)
return;
int64_t sample_size = shape[0].num_elements();
if (src.size() == 1) {
int64_t threads = 1024;
int64_t blocks = div_ceil(sample_size, threads);
Dst *data = dst.mutable_tensor<Dst>(0);
Fill<<<dim3(blocks), dim3(threads), 0, stream>>>(data, sample_size,
opaque(ConvertSat<Dst>(src[0])));
} else {
SmallVector<Dst, 64> tmp;
assert(static_cast<int>(src.size()) == sample_size);
tmp.resize(src.size());
for (size_t i = 0; i < tmp.size(); i++)
tmp[i] = ConvertSat<Dst>(src[i]);
int n = tmp.size() * sizeof(Dst);
CUDA_CALL(
cudaMemcpyAsync(dst.mutable_tensor<Dst>(0), tmp.data(), n, cudaMemcpyHostToDevice, stream));
}
kernels::ScatterGatherGPU scatter_gather;
for (int i = 1; i < shape.num_samples(); i++) {
scatter_gather.AddCopy(dst.mutable_tensor<Dst>(i), dst.mutable_tensor<Dst>(0),
sample_size * sizeof(Dst));
}
scatter_gather.Run(stream);
}
} // namespace
template <>
void Constant<GPUBackend>::RunImpl(Workspace &ws) {
output_.set_order(ws.output_order());
if (output_.num_samples() == 0) {
TYPE_SWITCH(output_type_, type2id, type, CONSTANT_OP_SUPPORTED_TYPES,
(
if (!fdata_.empty()) {
FillTensorList<type>(output_, max_output_shape_, fdata_, ws.stream());
} else {
FillTensorList<type>(output_, max_output_shape_, idata_, ws.stream());
}
), (DALI_FAIL(make_string("Unsupported type: ", output_type_)))); // NOLINT
}
auto &out = ws.Output<GPUBackend>(0);
out.Reset();
out.ShareData(output_);
out.Resize(output_shape_);
int N = output_shape_.num_samples();
for (int i = 0; i < N; i++) {
assert(out.raw_tensor(i) == output_.raw_tensor(i));
}
out.SetLayout(layout_);
}
DALI_REGISTER_OPERATOR(Constant, Constant<GPUBackend>, GPU);
} // namespace dali
|
76223d7d16a61f2041ea91f178568cf1f6266e58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.scalar_type() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : g.scalar_type();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(v.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(hipGetLastError());
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
TORCH_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
TORCH_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v);
auto grad_g = at::empty_like(saved_g);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(grad_w.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(hipGetLastError());
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
| 76223d7d16a61f2041ea91f178568cf1f6266e58.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.scalar_type() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : g.scalar_type();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>
<<<v.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(cudaGetLastError());
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
TORCH_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
TORCH_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v);
auto grad_g = at::empty_like(saved_g);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>
<<<grad_w.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(cudaGetLastError());
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
|
faea47effcdea2efc7eb2a68668e5d3dc7539b52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 1000000
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
} | faea47effcdea2efc7eb2a68668e5d3dc7539b52.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 1000000
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.